id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
155,892 | import math
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `get_sine_pos_embed` function. Write a Python function `def get_sine_pos_embed( pos_tensor: torch.Tensor, num_pos_feats: int = 128, temperature: int = 10000, exchange_xy: bool = True, ) -> torch.Tensor` to solve the following problem:
generate sine position embedding from a position tensor Args: pos_tensor (torch.Tensor): Shape as `(None, n)`. num_pos_feats (int): projected shape for each float in the tensor. Default: 128 temperature (int): The temperature used for scaling the position embedding. Default: 10000. exchange_xy (bool, optional): exchange pos x and pos y. \ For example, input tensor is `[x, y]`, the results will # noqa be `[pos(y), pos(x)]`. Defaults: True. Returns: torch.Tensor: Returned position embedding # noqa with shape `(None, n * num_pos_feats)`.
Here is the function:
def get_sine_pos_embed(
pos_tensor: torch.Tensor,
num_pos_feats: int = 128,
temperature: int = 10000,
exchange_xy: bool = True,
) -> torch.Tensor:
"""generate sine position embedding from a position tensor
Args:
pos_tensor (torch.Tensor): Shape as `(None, n)`.
num_pos_feats (int): projected shape for each float in the tensor. Default: 128
temperature (int): The temperature used for scaling
the position embedding. Default: 10000.
exchange_xy (bool, optional): exchange pos x and pos y. \
For example, input tensor is `[x, y]`, the results will # noqa
be `[pos(y), pos(x)]`. Defaults: True.
Returns:
torch.Tensor: Returned position embedding # noqa
with shape `(None, n * num_pos_feats)`.
"""
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
def sine_func(x: torch.Tensor):
sin_x = x * scale / dim_t
sin_x = torch.stack((sin_x[:, :, 0::2].sin(), sin_x[:, :, 1::2].cos()), dim=3).flatten(2)
return sin_x
pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)]
if exchange_xy:
pos_res[0], pos_res[1] = pos_res[1], pos_res[0]
pos_res = torch.cat(pos_res, dim=2)
return pos_res | generate sine position embedding from a position tensor Args: pos_tensor (torch.Tensor): Shape as `(None, n)`. num_pos_feats (int): projected shape for each float in the tensor. Default: 128 temperature (int): The temperature used for scaling the position embedding. Default: 10000. exchange_xy (bool, optional): exchange pos x and pos y. \ For example, input tensor is `[x, y]`, the results will # noqa be `[pos(y), pos(x)]`. Defaults: True. Returns: torch.Tensor: Returned position embedding # noqa with shape `(None, n * num_pos_feats)`. |
155,893 | import torch
import torch.nn as nn
from detrex.utils import inverse_sigmoid
The provided code snippet includes necessary dependencies for implementing the `apply_label_noise` function. Write a Python function `def apply_label_noise( labels: torch.Tensor, label_noise_prob: float = 0.2, num_classes: int = 80, )` to solve the following problem:
Args: labels (torch.Tensor): Classification labels with ``(num_labels, )``. label_noise_prob (float): The probability of the label being noised. Default: 0.2. num_classes (int): Number of total categories. Returns: torch.Tensor: The noised labels the same shape as ``labels``.
Here is the function:
def apply_label_noise(
labels: torch.Tensor,
label_noise_prob: float = 0.2,
num_classes: int = 80,
):
"""
Args:
labels (torch.Tensor): Classification labels with ``(num_labels, )``.
label_noise_prob (float): The probability of the label being noised. Default: 0.2.
num_classes (int): Number of total categories.
Returns:
torch.Tensor: The noised labels the same shape as ``labels``.
"""
if label_noise_prob > 0:
p = torch.rand_like(labels.float())
noised_index = torch.nonzero(p < label_noise_prob).view(-1)
new_lebels = torch.randint_like(noised_index, 0, num_classes)
noised_labels = labels.scatter_(0, noised_index, new_lebels)
return noised_labels
else:
return labels | Args: labels (torch.Tensor): Classification labels with ``(num_labels, )``. label_noise_prob (float): The probability of the label being noised. Default: 0.2. num_classes (int): Number of total categories. Returns: torch.Tensor: The noised labels the same shape as ``labels``. |
155,894 | import torch
import torch.nn as nn
from detrex.utils import inverse_sigmoid
The provided code snippet includes necessary dependencies for implementing the `apply_box_noise` function. Write a Python function `def apply_box_noise( boxes: torch.Tensor, box_noise_scale: float = 0.4, )` to solve the following problem:
Args: boxes (torch.Tensor): Bounding boxes in format ``(x_c, y_c, w, h)`` with shape ``(num_boxes, 4)`` box_noise_scale (float): Scaling factor for box noising. Default: 0.4.
Here is the function:
def apply_box_noise(
boxes: torch.Tensor,
box_noise_scale: float = 0.4,
):
"""
Args:
boxes (torch.Tensor): Bounding boxes in format ``(x_c, y_c, w, h)`` with
shape ``(num_boxes, 4)``
box_noise_scale (float): Scaling factor for box noising. Default: 0.4.
"""
if box_noise_scale > 0:
diff = torch.zeros_like(boxes)
diff[:, :2] = boxes[:, 2:] / 2
diff[:, 2:] = boxes[:, 2:]
boxes += torch.mul((torch.rand_like(boxes) * 2 - 1.0), diff) * box_noise_scale
boxes = boxes.clamp(min=0.0, max=1.0)
return boxes | Args: boxes (torch.Tensor): Bounding boxes in format ``(x_c, y_c, w, h)`` with shape ``(num_boxes, 4)`` box_noise_scale (float): Scaling factor for box noising. Default: 0.4. |
155,895 | import json
import logging
import numpy as np
import os
from PIL import Image
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.coco import load_coco_json, register_coco_instances
from detectron2.utils.file_io import PathManager
_PREDEFINED_SPLITS = {
# point annotations without masks
"ade20k_instance_train": (
"ADEChallengeData2016/images/training",
"ADEChallengeData2016/ade20k_instance_train.json",
),
"ade20k_instance_val": (
"ADEChallengeData2016/images/validation",
"ADEChallengeData2016/ade20k_instance_val.json",
),
}
def _get_ade_instances_meta():
thing_ids = [k["id"] for k in ADE_CATEGORIES]
assert len(thing_ids) == 100, len(thing_ids)
# Mapping from the incontiguous ADE category id to an id in [0, 99]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in ADE_CATEGORIES]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
}
return ret
def register_all_ade20k_instance(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_ade_instances_meta(),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
) | null |
155,896 | import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.file_io import PathManager
def register_ade20k_panoptic(
name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None
):
"""
Register a "standard" version of ADE20k panoptic segmentation dataset named `name`.
The dictionaries in this registered dataset follows detectron2's standard format.
Hence it's called "standard".
Args:
name (str): the name that identifies a dataset,
e.g. "ade20k_panoptic_train"
metadata (dict): extra metadata associated with this dataset.
image_root (str): directory which contains all the images
panoptic_root (str): directory which contains panoptic annotation images in COCO format
panoptic_json (str): path to the json panoptic annotation file in COCO format
sem_seg_root (none): not used, to be consistent with
`register_coco_panoptic_separated`.
instances_json (str): path to the json instance annotation file
"""
panoptic_name = name
DatasetCatalog.register(
panoptic_name,
lambda: load_ade20k_panoptic_json(
panoptic_json, image_root, panoptic_root, semantic_root, metadata
),
)
MetadataCatalog.get(panoptic_name).set(
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="ade20k_panoptic_seg",
ignore_label=255,
label_divisor=1000,
**metadata,
)
_PREDEFINED_SPLITS_ADE20K_PANOPTIC = {
"ade20k_panoptic_train": (
"ADEChallengeData2016/images/training",
"ADEChallengeData2016/ade20k_panoptic_train",
"ADEChallengeData2016/ade20k_panoptic_train.json",
"ADEChallengeData2016/annotations_detectron2/training",
"ADEChallengeData2016/ade20k_instance_train.json",
),
"ade20k_panoptic_val": (
"ADEChallengeData2016/images/validation",
"ADEChallengeData2016/ade20k_panoptic_val",
"ADEChallengeData2016/ade20k_panoptic_val.json",
"ADEChallengeData2016/annotations_detectron2/validation",
"ADEChallengeData2016/ade20k_instance_val.json",
),
}
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1]
stuff_classes = [k["name"] for k in ADE20K_150_CATEGORIES]
stuff_colors = [k["color"] for k in ADE20K_150_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(ADE20K_150_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# else:
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def register_all_ade20k_panoptic(root):
metadata = get_metadata()
for (
prefix,
(image_root, panoptic_root, panoptic_json, semantic_root, instance_json),
) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items():
# The "standard" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic-DeepLab
register_ade20k_panoptic(
prefix,
metadata,
os.path.join(root, image_root),
os.path.join(root, panoptic_root),
os.path.join(root, semantic_root),
os.path.join(root, panoptic_json),
os.path.join(root, instance_json),
) | null |
155,897 | import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
def _get_ade20k_full_meta():
# Id 0 is reserved for ignore_label, we change ignore_label for 0
# to 255 in our pre-processing, so all ids are shifted by 1.
stuff_ids = [k["id"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES]
assert len(stuff_ids) == 847, len(stuff_ids)
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 91], used in models) to ids in the dataset (used for processing results)
stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
stuff_classes = [k["name"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
}
return ret
def register_all_ade20k_full(root):
root = os.path.join(root, "ADE20K_2021_17_01")
meta = _get_ade20k_full_meta()
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(root, "images_detectron2", dirname)
gt_dir = os.path.join(root, "annotations_detectron2", dirname)
name = f"ade20k_full_sem_seg_{name}"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="tif", image_ext="jpg")
)
MetadataCatalog.get(name).set(
stuff_classes=meta["stuff_classes"][:],
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=65535, # NOTE: gt is saved in 16-bit TIFF images
) | null |
155,898 | import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
def _get_mapillary_vistas_meta():
stuff_classes = [k["readable"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
assert len(stuff_classes) == 65
stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
assert len(stuff_colors) == 65
ret = {
"stuff_classes": stuff_classes,
"stuff_colors": stuff_colors,
}
return ret
def register_all_mapillary_vistas(root):
root = os.path.join(root, "mapillary_vistas")
meta = _get_mapillary_vistas_meta()
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(root, dirname, "images")
gt_dir = os.path.join(root, dirname, "labels")
name = f"mapillary_vistas_sem_seg_{name}"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65
**meta,
) | null |
155,899 | import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.file_io import PathManager
def register_mapillary_vistas_panoptic(
name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None
):
"""
Register a "standard" version of ADE20k panoptic segmentation dataset named `name`.
The dictionaries in this registered dataset follows detectron2's standard format.
Hence it's called "standard".
Args:
name (str): the name that identifies a dataset,
e.g. "ade20k_panoptic_train"
metadata (dict): extra metadata associated with this dataset.
image_root (str): directory which contains all the images
panoptic_root (str): directory which contains panoptic annotation images in COCO format
panoptic_json (str): path to the json panoptic annotation file in COCO format
sem_seg_root (none): not used, to be consistent with
`register_coco_panoptic_separated`.
instances_json (str): path to the json instance annotation file
"""
panoptic_name = name
DatasetCatalog.register(
panoptic_name,
lambda: load_mapillary_vistas_panoptic_json(
panoptic_json, image_root, panoptic_root, semantic_root, metadata
),
)
MetadataCatalog.get(panoptic_name).set(
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="mapillary_vistas_panoptic_seg",
ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65
label_divisor=1000,
**metadata,
)
_PREDEFINED_SPLITS_ADE20K_PANOPTIC = {
"mapillary_vistas_panoptic_train": (
"mapillary_vistas/training/images",
"mapillary_vistas/training/panoptic",
"mapillary_vistas/training/panoptic/panoptic_2018.json",
"mapillary_vistas/training/labels",
),
"mapillary_vistas_panoptic_val": (
"mapillary_vistas/validation/images",
"mapillary_vistas/validation/panoptic",
"mapillary_vistas/validation/panoptic/panoptic_2018.json",
"mapillary_vistas/validation/labels",
),
}
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
thing_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
stuff_classes = [k["name"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(MAPILLARY_VISTAS_SEM_SEG_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# else:
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def register_all_mapillary_vistas_panoptic(root):
metadata = get_metadata()
for (
prefix,
(image_root, panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items():
# The "standard" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic-DeepLab
register_mapillary_vistas_panoptic(
prefix,
metadata,
os.path.join(root, image_root),
os.path.join(root, panoptic_root),
os.path.join(root, semantic_root),
os.path.join(root, panoptic_json),
) | null |
155,900 | import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
from detectron2.utils.file_io import PathManager
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
"coco/panoptic_train2017",
"coco/annotations/panoptic_train2017.json",
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
"coco/panoptic_semseg_train2017",
),
"coco_2017_val_panoptic": (
"coco/panoptic_val2017",
"coco/annotations/panoptic_val2017.json",
"coco/panoptic_semseg_val2017",
),
}
def get_metadata():
def register_coco_panoptic_annos_sem_seg(
name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
):
def register_all_coco_panoptic_annos_sem_seg(root):
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
register_coco_panoptic_annos_sem_seg(
prefix,
get_metadata(),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
os.path.join(root, semantic_root),
instances_json,
) | null |
155,901 | import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
def _get_coco_stuff_meta():
# Id 0 is reserved for ignore_label, we change ignore_label for 0
# to 255 in our pre-processing.
stuff_ids = [k["id"] for k in COCO_CATEGORIES]
assert len(stuff_ids) == 171, len(stuff_ids)
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 91], used in models) to ids in the dataset (used for processing results)
stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
}
return ret
def register_all_coco_stuff_10k(root):
root = os.path.join(root, "coco", "coco_stuff_10k")
meta = _get_coco_stuff_meta()
for name, image_dirname, sem_seg_dirname in [
("train", "images_detectron2/train", "annotations_detectron2/train"),
("test", "images_detectron2/test", "annotations_detectron2/test"),
]:
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname)
name = f"coco_2017_{name}_stuff_10k_sem_seg"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**meta,
) | null |
155,902 | import copy
import logging
import numpy as np
import torch
from detectron2.structures import Instances, Boxes, PolygonMasks
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from pycocotools import mask as coco_mask
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks | null |
155,903 | import copy
import logging
import numpy as np
import torch
from detectron2.structures import Instances, Boxes, PolygonMasks
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from pycocotools import mask as coco_mask
The provided code snippet includes necessary dependencies for implementing the `build_transform_gen` function. Write a Python function `def build_transform_gen( image_size, min_scale, max_scale, random_flip: str = "horizontal", is_train: bool = True, )` to solve the following problem:
Create a list of default :class:`Augmentation`. Now it includes resizing and flipping. Returns: list[Augmentation]
Here is the function:
def build_transform_gen(
image_size,
min_scale,
max_scale,
random_flip: str = "horizontal",
is_train: bool = True,
):
"""
Create a list of default :class:`Augmentation`.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation."
assert random_flip in ["none", "horizontal", "vertical"], f"Only support none/horizontal/vertical flip, but got {random_flip}"
augmentation = []
if random_flip != "none":
augmentation.append(
T.RandomFlip(
horizontal=random_flip == "horizontal",
vertical=random_flip == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size,
),
T.FixedSizeCrop(crop_size=(image_size, image_size))
])
return augmentation | Create a list of default :class:`Augmentation`. Now it includes resizing and flipping. Returns: list[Augmentation] |
155,904 | import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.structures import BitMasks, Instances
from detectron2.projects.point_rend import ColorAugSSDTransform
def build_transform_gen(
min_size_train,
max_size_train,
min_size_train_sampling,
enabled_crop: bool,
crop_params: dict,
color_aug_ssd: bool,
img_format: str,
is_train: bool = True
):
assert is_train, "Only support training augmentation."
augmentations = []
augmentations.append(
T.ResizeShortestEdge(
min_size_train,
max_size_train,
min_size_train_sampling
)
)
if enabled_crop:
augmentations.append(
T.RandomCrop_CategoryAreaConstraint(
**crop_params,
)
)
if color_aug_ssd:
augmentations.append(ColorAugSSDTransform(img_format=img_format))
augmentations.append(T.RandomFlip())
return augmentations | null |
155,905 | import copy
import logging
import numpy as np
import pycocotools.mask as mask_util
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.structures import BitMasks, Instances, polygons_to_bitmask
from detrex.data.transforms import ColorAugSSDTransform
def build_transform_gen(
min_size_train,
max_size_train,
min_size_train_sampling,
enabled_crop: bool,
crop_type: str,
crop_size: str,
color_aug_ssd: bool,
img_format: str,
is_train: bool = True
):
assert is_train, "Only support training augmentation."
augmentations = []
augmentations.append(
[
T.ResizeShortestEdge(
min_size_train,
max_size_train,
min_size_train_sampling
)
]
)
if enabled_crop:
augmentations.append(
T.RandomCrop(
crop_type=crop_type,
crop_size=crop_size,
)
)
if color_aug_ssd:
augmentations.append(ColorAugSSDTransform(img_format=img_format))
augmentations.append(T.RandomFlip())
return augmentations | null |
155,906 | import copy
import logging
import numpy as np
import torch
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.structures import BitMasks, Boxes, Instances
The provided code snippet includes necessary dependencies for implementing the `build_transform_gen` function. Write a Python function `def build_transform_gen( image_size, min_scale, max_scale, random_flip: str = "horizontal", is_train: bool = True, )` to solve the following problem:
Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation]
Here is the function:
def build_transform_gen(
image_size,
min_scale,
max_scale,
random_flip: str = "horizontal",
is_train: bool = True,
):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation"
augmentation = []
if random_flip != "none":
augmentation.append(
T.RandomFlip(
horizontal=random_flip == "horizontal",
vertical=random_flip == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
),
T.FixedSizeCrop(crop_size=(image_size, image_size)),
])
return augmentation | Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation] |
155,907 | import torch
import torch.nn as nn
from .utils import weight_reduce_loss
def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == "mean":
# Avoid causing ZeroDivisionError when avg_factor is 0.0.
eps = torch.finfo(loss.dtype).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != "none":
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `smooth_l1_loss` function. Write a Python function `def smooth_l1_loss( preds, targets, weight=None, beta: float = 1.0, reduction: str = "mean", avg_factor: int = None, )` to solve the following problem:
Smooth L1 loss. Args: preds (torch.Tensor): The prediction. targets (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss
Here is the function:
def smooth_l1_loss(
preds,
targets,
weight=None,
beta: float = 1.0,
reduction: str = "mean",
avg_factor: int = None,
):
"""Smooth L1 loss.
Args:
preds (torch.Tensor): The prediction.
targets (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if targets.numel() == 0:
return preds.sum() * 0
assert preds.size() == targets.size()
if beta < 1e-5:
loss = torch.abs(preds - targets)
else:
diff = torch.abs(preds - targets)
cond = diff < beta
loss = torch.where(cond, 0.5 * diff * diff / beta, diff - 0.5 * beta)
if weight is not None:
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss | Smooth L1 loss. Args: preds (torch.Tensor): The prediction. targets (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss |
155,908 | import torch
import torch.nn as nn
from .utils import weight_reduce_loss
def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == "mean":
# Avoid causing ZeroDivisionError when avg_factor is 0.0.
eps = torch.finfo(loss.dtype).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != "none":
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def l1_loss(
preds,
targets,
weight=None,
reduction: str = "mean",
avg_factor: int = None,
):
if targets.numel() == 0:
return preds.sum() * 0
assert preds.size() == targets.size()
loss = torch.abs(preds - targets)
if weight is not None:
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss | null |
155,909 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import weight_reduce_loss
def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == "mean":
# Avoid causing ZeroDivisionError when avg_factor is 0.0.
eps = torch.finfo(loss.dtype).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != "none":
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `sigmoid_focal_loss` function. Write a Python function `def sigmoid_focal_loss( preds, targets, weight=None, alpha: float = 0.25, gamma: float = 2, reduction: str = "mean", avg_factor: int = None, )` to solve the following problem:
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: preds (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha (float, optional): Weighting factor in range (0, 1) to balance positive vs negative examples. Default: 0.25. gamma (float): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Default: 2. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. avg_factor (int): Average factor that is used to average the loss. Default: None. Returns: torch.Tensor: The computed sigmoid focal loss with the reduction option applied.
Here is the function:
def sigmoid_focal_loss(
preds,
targets,
weight=None,
alpha: float = 0.25,
gamma: float = 2,
reduction: str = "mean",
avg_factor: int = None,
):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
preds (torch.Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha (float, optional): Weighting factor in range (0, 1) to balance
positive vs negative examples. Default: 0.25.
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples. Default: 2.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
avg_factor (int): Average factor that is used to average
the loss. Default: None.
Returns:
torch.Tensor: The computed sigmoid focal loss with the reduction option applied.
"""
preds = preds.float()
targets = targets.float()
p = torch.sigmoid(preds)
ce_loss = F.binary_cross_entropy_with_logits(preds, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if weight is not None:
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: preds (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha (float, optional): Weighting factor in range (0, 1) to balance positive vs negative examples. Default: 0.25. gamma (float): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Default: 2. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. avg_factor (int): Average factor that is used to average the loss. Default: None. Returns: torch.Tensor: The computed sigmoid focal loss with the reduction option applied. |
155,910 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import weight_reduce_loss
def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == "mean":
# Avoid causing ZeroDivisionError when avg_factor is 0.0.
eps = torch.finfo(loss.dtype).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != "none":
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `focal_loss_with_prob` function. Write a Python function `def focal_loss_with_prob( preds, targets, weight=None, alpha=0.25, gamma=2.0, reduction="mean", avg_factor=None, )` to solve the following problem:
PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Different from `sigmoid_focal_loss`, this function accepts probability as input. Args: preds (torch.Tensor): The prediction probability with shape (N, C), C is the number of classes. targets (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None.
Here is the function:
def focal_loss_with_prob(
preds,
targets,
weight=None,
alpha=0.25,
gamma=2.0,
reduction="mean",
avg_factor=None,
):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Different from `sigmoid_focal_loss`, this function accepts probability
as input.
Args:
preds (torch.Tensor): The prediction probability with shape (N, C),
C is the number of classes.
targets (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
num_classes = preds.size(1)
targets = F.one_hot(targets, num_classes=num_classes + 1)
targets = targets[:, :num_classes]
targets = targets.type_as(preds)
p_t = preds * targets + (1 - preds) * (1 - targets)
ce_loss = F.binary_cross_entropy(preds, targets, reduction="none")
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if weight is not None:
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss | PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Different from `sigmoid_focal_loss`, this function accepts probability as input. Args: preds (torch.Tensor): The prediction probability with shape (N, C), C is the number of classes. targets (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. |
155,911 | import torch
import torch.nn as nn
from .utils import weight_reduce_loss
def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == "mean":
# Avoid causing ZeroDivisionError when avg_factor is 0.0.
eps = torch.finfo(loss.dtype).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != "none":
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `dice_loss` function. Write a Python function `def dice_loss( preds, targets, weight=None, eps: float = 1e-4, reduction: str = "mean", avg_factor: int = None, )` to solve the following problem:
Compute the DICE loss, similar to generalized IOU for masks Args: preds (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). weight (torch.Tensor, optional): The weight of loss for each prediction, has a shape (n,). Defaults to None. eps (float): Avoid dividing by zero. Default: 1e-4. avg_factor (int, optional): Average factor that is used to average the loss. Default: None. Return: torch.Tensor: The computed dice loss.
Here is the function:
def dice_loss(
preds,
targets,
weight=None,
eps: float = 1e-4,
reduction: str = "mean",
avg_factor: int = None,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
preds (torch.Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (torch.Tensor):
A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
eps (float): Avoid dividing by zero. Default: 1e-4.
avg_factor (int, optional): Average factor that is used to average
the loss. Default: None.
Return:
torch.Tensor: The computed dice loss.
"""
preds = preds.flatten(1)
targets = targets.flatten(1).float()
numerator = 2 * torch.sum(preds * targets, 1) + eps
denominator = torch.sum(preds, 1) + torch.sum(targets, 1) + eps
loss = 1 - (numerator + 1) / (denominator + 1)
if weight is not None:
assert weight.ndim == loss.ndim
assert len(weight) == len(preds)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss | Compute the DICE loss, similar to generalized IOU for masks Args: preds (torch.Tensor): A float tensor of arbitrary shape. The predictions for each example. targets (torch.Tensor): A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). weight (torch.Tensor, optional): The weight of loss for each prediction, has a shape (n,). Defaults to None. eps (float): Avoid dividing by zero. Default: 1e-4. avg_factor (int, optional): Average factor that is used to average the loss. Default: None. Return: torch.Tensor: The computed dice loss. |
155,912 | import torch
import torch.nn as nn
from .utils import weight_reduce_loss
def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == "mean":
# Avoid causing ZeroDivisionError when avg_factor is 0.0.
eps = torch.finfo(loss.dtype).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != "none":
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `giou_loss` function. Write a Python function `def giou_loss( preds: torch.Tensor, targets: torch.Tensor, weight=None, eps: float = 1e-6, reduction: str = "mean", avg_factor: int = None, )` to solve the following problem:
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: preds (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). targets (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor.
Here is the function:
def giou_loss(
preds: torch.Tensor,
targets: torch.Tensor,
weight=None,
eps: float = 1e-6,
reduction: str = "mean",
avg_factor: int = None,
):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
preds (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
targets (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
if targets.numel() == 0:
return preds.sum() * 0
x1, y1, x2, y2 = preds.unbind(dim=-1)
x1g, y1g, x2g, y2g = targets.unbind(dim=-1)
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsctk = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
iouk = intsctk / (unionk + eps)
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
area_c = (xc2 - xc1) * (yc2 - yc1)
miouk = iouk - ((area_c - unionk) / (area_c + eps))
loss = 1 - miouk
if weight is not None:
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss | r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: preds (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). targets (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. |
155,913 | import warnings
import torch.nn as nn
import torch.nn.functional as F
from .utils import weight_reduce_loss
def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == "mean":
# Avoid causing ZeroDivisionError when avg_factor is 0.0.
eps = torch.finfo(loss.dtype).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != "none":
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def cross_entropy(
preds,
targets,
weight=None,
class_weight=None,
reduction="mean",
avg_factor=None,
ignore_index=-100,
avg_non_ignore=False,
):
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
loss = F.cross_entropy(
preds,
targets,
weight=class_weight,
reduction="none",
ignore_index=ignore_index,
)
# average loss over non-ignored elements
# pytorch's official cross_entropy average loss over non-ignored elements
# refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa
if (avg_factor is None) and avg_non_ignore and reduction == "mean":
avg_factor = targets.numel() - (targets == ignore_index).sum().item()
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss | null |
155,918 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import trunc_normal_, DropPath
from detectron2.utils.logger import setup_logger
from detectron2.modeling.backbone import Backbone
from detrex.layers import dcn_v3 as opsm
class to_channels_first(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.permute(0, 3, 1, 2)
class to_channels_last(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.permute(0, 2, 3, 1)
def build_norm_layer(dim,
norm_layer,
in_format='channels_last',
out_format='channels_last',
eps=1e-6):
layers = []
if norm_layer == 'BN':
if in_format == 'channels_last':
layers.append(to_channels_first())
layers.append(nn.BatchNorm2d(dim))
if out_format == 'channels_last':
layers.append(to_channels_last())
elif norm_layer == 'LN':
if in_format == 'channels_first':
layers.append(to_channels_last())
layers.append(nn.LayerNorm(dim, eps=eps))
if out_format == 'channels_first':
layers.append(to_channels_first())
else:
raise NotImplementedError(
f'build_norm_layer does not support {norm_layer}')
return nn.Sequential(*layers) | null |
155,919 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import trunc_normal_, DropPath
from detectron2.utils.logger import setup_logger
from detectron2.modeling.backbone import Backbone
from detrex.layers import dcn_v3 as opsm
def build_act_layer(act_layer):
if act_layer == 'ReLU':
return nn.ReLU(inplace=True)
elif act_layer == 'SiLU':
return nn.SiLU(inplace=True)
elif act_layer == 'GELU':
return nn.GELU()
raise NotImplementedError(f'build_act_layer does not support {act_layer}') | null |
155,920 | import warnings
from typing import Tuple
import torch.nn as nn
from detectron2.modeling.backbone import Backbone
from detectron2.utils import comm
from detectron2.utils.logger import setup_logger
The provided code snippet includes necessary dependencies for implementing the `log_timm_feature_info` function. Write a Python function `def log_timm_feature_info(feature_info)` to solve the following problem:
Print feature_info of timm backbone to help development and debug. Args: feature_info (list[dict] | timm.models.features.FeatureInfo | None): feature_info of timm backbone.
Here is the function:
def log_timm_feature_info(feature_info):
"""Print feature_info of timm backbone to help development and debug.
Args:
feature_info (list[dict] | timm.models.features.FeatureInfo | None):
feature_info of timm backbone.
"""
logger = setup_logger(name="timm backbone")
if feature_info is None:
logger.warning("This backbone does not have feature_info")
elif isinstance(feature_info, list):
for feat_idx, each_info in enumerate(feature_info):
logger.info(f"backbone feature_info[{feat_idx}]: {each_info}")
else:
try:
logger.info(f"backbone out_indices: {feature_info.out_indices}")
logger.info(f"backbone out_channels: {feature_info.channels()}")
logger.info(f"backbone out_strides: {feature_info.reduction()}")
except AttributeError:
logger.warning("Unexpected format of backbone feature_info") | Print feature_info of timm backbone to help development and debug. Args: feature_info (list[dict] | timm.models.features.FeatureInfo | None): feature_info of timm backbone. |
155,921 | import logging
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, Size
from typing import Union, List
from torch.nn.parameter import Parameter
import numbers
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.fpn import _assert_strides_are_log2_contiguous
from fairscale.nn.checkpoint import checkpoint_wrapper
from timm.models.layers import DropPath, Mlp, trunc_normal_
from detectron2.modeling.backbone import Backbone
from .utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
)
The provided code snippet includes necessary dependencies for implementing the `get_vit_lr_decay_rate` function. Write a Python function `def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12)` to solve the following problem:
Calculate lr decay rate for different ViT blocks. Args: name (string): parameter name. lr_decay_rate (float): base lr decay rate. num_layers (int): number of ViT blocks. Returns: lr decay rate for the given parameter.
Here is the function:
def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12):
"""
Calculate lr decay rate for different ViT blocks.
Args:
name (string): parameter name.
lr_decay_rate (float): base lr decay rate.
num_layers (int): number of ViT blocks.
Returns:
lr decay rate for the given parameter.
"""
layer_id = num_layers + 1
if name.startswith("backbone"):
if ".pos_embed" in name or ".patch_embed" in name:
layer_id = 0
elif ".blocks." in name and ".residual." not in name:
layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1
return lr_decay_rate ** (num_layers + 1 - layer_id) | Calculate lr decay rate for different ViT blocks. Args: name (string): parameter name. lr_decay_rate (float): base lr decay rate. num_layers (int): number of ViT blocks. Returns: lr decay rate for the given parameter. |
155,922 | import logging
import math
from functools import partial
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.fpn import _assert_strides_are_log2_contiguous
from detectron2.modeling.backbone import Backbone
from .eva_02_utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
VisionRotaryEmbeddingFast,
)
The provided code snippet includes necessary dependencies for implementing the `get_vit_lr_decay_rate` function. Write a Python function `def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12)` to solve the following problem:
Calculate lr decay rate for different ViT blocks. Args: name (string): parameter name. lr_decay_rate (float): base lr decay rate. num_layers (int): number of ViT blocks. Returns: lr decay rate for the given parameter.
Here is the function:
def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12):
"""
Calculate lr decay rate for different ViT blocks.
Args:
name (string): parameter name.
lr_decay_rate (float): base lr decay rate.
num_layers (int): number of ViT blocks.
Returns:
lr decay rate for the given parameter.
"""
layer_id = num_layers + 1
if name.startswith("backbone"):
if ".pos_embed" in name or ".patch_embed" in name:
layer_id = 0
elif ".blocks." in name and ".residual." not in name:
layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1
return lr_decay_rate ** (num_layers + 1 - layer_id) | Calculate lr decay rate for different ViT blocks. Args: name (string): parameter name. lr_decay_rate (float): base lr decay rate. num_layers (int): number of ViT blocks. Returns: lr decay rate for the given parameter. |
155,929 | import numpy as np
from typing import List
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import (
CNNBlockBase,
Conv2d,
DeformConv,
ModulatedDeformConv,
ShapeSpec,
get_norm,
)
from detectron2.modeling.backbone import Backbone
class BasicBlock(CNNBlockBase):
"""
The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`,
with two 3x3 conv layers and a projection shortcut if needed.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int): Stride for the first conv.
norm (str or callable): normalization for all conv layers.
See :func:`detectron2.layers.get_norm` for supported format.
"""
def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"):
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.conv2 = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class BottleneckBlock(CNNBlockBase):
"""
The standard bottleneck residual block used by ResNet-50, 101 and 152
defined in :paper:`ResNet`. It contains 3 conv layers with kernels
1x1, 3x3, 1x1, and a projection shortcut if needed.
Args:
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
num_groups (int): number of groups for the 3x3 conv layer.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
stride_in_1x1 (bool): when stride>1, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
dilation (int): the dilation rate of the 3x3 conv layer.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
"""Forward function of `BottleneckBlock`."""
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class DeformBottleneckBlock(CNNBlockBase):
"""
Similar to :class:`BottleneckBlock`, but with
paper `Deformable Convolutional Networks
<https://arxiv.org/pdf/1703.06211.pdf>`_ in the 3x3 convolution.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
deform_modulated=False,
deform_num_groups=1,
):
super().__init__(in_channels, out_channels, stride)
self.deform_modulated = deform_modulated
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
if deform_modulated:
deform_conv_op = ModulatedDeformConv
# offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size
offset_channels = 27
else:
deform_conv_op = DeformConv
offset_channels = 18
self.conv2_offset = Conv2d(
bottleneck_channels,
offset_channels * deform_num_groups,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
dilation=dilation,
)
self.conv2 = deform_conv_op(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
deformable_groups=deform_num_groups,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
nn.init.constant_(self.conv2_offset.weight, 0)
nn.init.constant_(self.conv2_offset.bias, 0)
def forward(self, x):
"""Forward function of `DeformBottleneckBlock`."""
out = self.conv1(x)
out = F.relu_(out)
if self.deform_modulated:
offset_mask = self.conv2_offset(out)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class ResNet(Backbone):
"""
Implement paper `Deep Residual Learning for Image Recognition
<https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
stem (nn.Module): a stem module.
stages (list[list[detectron2.layers.CNNBlockBase]]): several (typically 4) stages,
each contains multiple :class:`detectron2.layers.CNNBlockBase`.
num_classes (None or int): if None, will not perform classification.
Otherwise, will create a linear layer.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
freeze_at (int): The number of stages at the beginning to freeze.
see :meth:`freeze` for detailed explanation.
"""
def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0):
super().__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stage_names, self.stages = [], []
if out_features is not None:
# Avoid keeping unused layers in this module. They consume extra memory
# and may cause allreduce to fail
num_stages = max(
[{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features]
)
stages = stages[:num_stages]
for i, blocks in enumerate(stages):
assert len(blocks) > 0, len(blocks)
for block in blocks:
assert isinstance(block, CNNBlockBase), block
name = "res" + str(i + 2)
stage = nn.Sequential(*blocks)
self.add_module(name, stage)
self.stage_names.append(name)
self.stages.append(stage)
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels
self.stage_names = tuple(self.stage_names) # Make it static for scripting
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
self.freeze(freeze_at)
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for name, stage in zip(self.stage_names, self.stages):
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def freeze(self, freeze_at=0):
"""
Freeze the first several stages of the ResNet. Commonly used in
fine-tuning.
Layers that produce the same feature map spatial size are defined as one
"stage" by paper `Feature Pyramid Networks for Object Detection
<https://arxiv.org/pdf/1612.03144.pdf>`_.
Args:
freeze_at (int): number of stages to freeze.
`1` means freezing the stem. `2` means freezing the stem and
one residual stage, etc.
Returns:
nn.Module: this ResNet itself
"""
if freeze_at >= 1:
self.stem.freeze()
for idx, stage in enumerate(self.stages, start=2):
if freeze_at >= idx:
for block in stage.children():
block.freeze()
return self
def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs):
"""
Create a list of blocks of the same type that forms one ResNet stage.
Args:
block_class (type): a subclass of ``detectron2.layers.CNNBlockBase`` that's
used to create all blocks in this stage. A module of this type
must not change spatial resolution of inputs unless its stride != 1.
num_blocks (int): number of blocks in this stage
in_channels (int): input channels of the entire stage.
out_channels (int): output channels of **every block** in the stage.
kwargs: other arguments passed to the constructor of
`block_class`. If the argument name is "xx_per_block", the
argument is a list of values to be passed to each block in the
stage. Otherwise, the same argument is passed to every block
in the stage.
Returns:
list[detectron2.layers.CNNBlockBase]: a list of block module.
Examples:
::
stage = ResNet.make_stage(
BottleneckBlock, 3, in_channels=16, out_channels=64,
bottleneck_channels=16, num_groups=1,
stride_per_block=[2, 1, 1],
dilations_per_block=[1, 1, 2]
)
Usually, layers that produce the same feature map spatial size are defined as one
"stage" (in paper `Feature Pyramid Networks for Object Detection
<https://arxiv.org/pdf/1612.03144.pdf>`_).
Under such definition, ``stride_per_block[1:]`` should all be 1.
"""
blocks = []
for i in range(num_blocks):
curr_kwargs = {}
for k, v in kwargs.items():
if k.endswith("_per_block"):
assert len(v) == num_blocks, (
f"Argument '{k}' of make_stage should have the "
f"same length as num_blocks={num_blocks}."
)
newk = k[: -len("_per_block")]
assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!"
curr_kwargs[newk] = v[i]
else:
curr_kwargs[k] = v
blocks.append(
block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)
)
in_channels = out_channels
return blocks
def make_default_stages(depth, block_class=None, **kwargs):
"""
Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152).
If it doesn't create the ResNet variant you need, please use :meth:`make_stage`
instead for fine-grained customization.
Args:
depth (int): depth of ResNet
block_class (type): the CNN block class. Has to accept
`bottleneck_channels` argument for depth > 50.
By default it is BasicBlock or BottleneckBlock, based on the
depth.
kwargs:
other arguments to pass to `make_stage`. Should not contain
stride and channels, as they are predefined for each depth.
Returns:
list[list[detectron2.layers.CNNBlockBase]]: modules in all stages; see arguments of
:class:`ResNet`.
"""
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if block_class is None:
block_class = BasicBlock if depth < 50 else BottleneckBlock
if depth < 50:
in_channels = [64, 64, 128, 256]
out_channels = [64, 128, 256, 512]
else:
in_channels = [64, 256, 512, 1024]
out_channels = [256, 512, 1024, 2048]
ret = []
for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):
if depth >= 50:
kwargs["bottleneck_channels"] = o // 4
ret.append(
ResNet.make_stage(
block_class=block_class,
num_blocks=n,
stride_per_block=[s] + [1] * (n - 1),
in_channels=i,
out_channels=o,
**kwargs,
)
)
return ret
The provided code snippet includes necessary dependencies for implementing the `make_stage` function. Write a Python function `def make_stage( depth: int = 50, norm: float = "FrozenBN", num_groups: int = 1, width_per_group: int = 64, in_channels: int = 64, out_channels: int = 256, stride_in_1x1: bool = False, res5_dilation: int = 1, deform_on_per_stage: List[bool] = [False, False, False, False], deform_modulated: bool = False, deform_num_groups: int = 1, )` to solve the following problem:
Modified from `detectron2.modeling.backbone.build_resnet_backbone <https://github.com/facebookresearch/detectron2/blob/717ab9f0aeca216a2f800e43d705766251ba3a55/detectron2/modeling/backbone/resnet.py#L614>`_ Create a list of blocks of the same type that forms one ResNet stage. Args: depth (int): The depth of ResNet. Default: 50. norm (str or callable): Normalization for all conv layers. See :func:`detectron2.layers.get_norm` for supported format. Default: `FrozenBN`. num_groups (int): The number of groups for the 3x3 conv layer. Default: 1. width_per_group (int): Baseline width of each group. Scaling this parameters will scale the width of all bottleneck layers. Default: 64. in_channels (int): Output feature channels of the `Stem` Block. Needs to be set to 64 for R18 and R34. Default: 64. out_channels (int): Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet. Default: 256. stride_in_1x1 (bool): Place the stride 2 conv on the 1x1 filter. Use True only for the original MSRA ResNet; use False for C2 and Torch models. Default: False. res5_dilation (int): Apply dilation in stage "res5". Default: 1. deform_on_per_stage (List[bool]): Apply Deformable Convolution in stages. Specify if apply `deform_conv` on Res2, Res3, Res4, Res5. Default: `[False, False, False, False]`. deform_modulated: Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168); Use False for DeformableV1. Default: False. deform_num_groups (int): Number of groups in deformable conv. Default: 1. Returns: list[detectron2.layers.CNNBlockBase]: a list of block module. Examples: :: from detrex.modeling.backbone import make_stage, ResNet, BasicStem resnet50_dc5 = ResNet( stem=BasicStem(in_channels=3, out_channels=64, norm="FrozenBN"), stages=make_stage( depth=50, norm="FrozenBN", in_channels=64, out_channels=256, res5_dilation=2, ), out_features=["res2", "res3", "res4", "res5"], freeze_at=1, )
Here is the function:
def make_stage(
depth: int = 50,
norm: float = "FrozenBN",
num_groups: int = 1,
width_per_group: int = 64,
in_channels: int = 64,
out_channels: int = 256,
stride_in_1x1: bool = False,
res5_dilation: int = 1,
deform_on_per_stage: List[bool] = [False, False, False, False],
deform_modulated: bool = False,
deform_num_groups: int = 1,
):
"""
Modified from `detectron2.modeling.backbone.build_resnet_backbone
<https://github.com/facebookresearch/detectron2/blob/717ab9f0aeca216a2f800e43d705766251ba3a55/detectron2/modeling/backbone/resnet.py#L614>`_
Create a list of blocks of the same type that forms one ResNet stage.
Args:
depth (int): The depth of ResNet. Default: 50.
norm (str or callable): Normalization for all conv layers.
See :func:`detectron2.layers.get_norm` for supported format.
Default: `FrozenBN`.
num_groups (int): The number of groups for the 3x3 conv layer. Default: 1.
width_per_group (int): Baseline width of each group.
Scaling this parameters will scale the width of all bottleneck layers.
Default: 64.
in_channels (int): Output feature channels of the `Stem` Block. Needs
to be set to 64 for R18 and R34. Default: 64.
out_channels (int): Output width of res2. Scaling this parameters
will scale the width of all 1x1 convs in ResNet. Default: 256.
stride_in_1x1 (bool): Place the stride 2 conv on the 1x1 filter.
Use True only for the original MSRA ResNet;
use False for C2 and Torch models. Default: False.
res5_dilation (int): Apply dilation in stage "res5". Default: 1.
deform_on_per_stage (List[bool]): Apply Deformable Convolution in stages.
Specify if apply `deform_conv` on Res2, Res3, Res4, Res5.
Default: `[False, False, False, False]`.
deform_modulated: Use True to use modulated deform_conv
(DeformableV2, https://arxiv.org/abs/1811.11168); Use False for DeformableV1.
Default: False.
deform_num_groups (int): Number of groups in deformable conv. Default: 1.
Returns:
list[detectron2.layers.CNNBlockBase]: a list of block module.
Examples:
::
from detrex.modeling.backbone import make_stage, ResNet, BasicStem
resnet50_dc5 = ResNet(
stem=BasicStem(in_channels=3, out_channels=64, norm="FrozenBN"),
stages=make_stage(
depth=50,
norm="FrozenBN",
in_channels=64,
out_channels=256,
res5_dilation=2,
),
out_features=["res2", "res3", "res4", "res5"],
freeze_at=1,
)
"""
bottleneck_channels = num_groups * width_per_group
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if depth in [18, 34]:
assert out_channels == 64, "Must set out_channels = 64 for R18/R34"
assert not any(deform_on_per_stage), "deform_on_per_stage unsupported for R18/R34"
assert res5_dilation == 1, "Must set res5_dilation = 1 for R18/R34"
assert num_groups == 1, "Must set num_groups = 1 for R18/R34"
stages = []
for idx, stage_idx in enumerate(range(2, 6)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"out_channels": out_channels,
"norm": norm,
}
# Use BasicBlock for R18 and R34.
if depth in [18, 34]:
stage_kargs["block_class"] = BasicBlock
else:
stage_kargs["bottleneck_channels"] = bottleneck_channels
stage_kargs["stride_in_1x1"] = stride_in_1x1
stage_kargs["dilation"] = dilation
stage_kargs["num_groups"] = num_groups
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
return stages | Modified from `detectron2.modeling.backbone.build_resnet_backbone <https://github.com/facebookresearch/detectron2/blob/717ab9f0aeca216a2f800e43d705766251ba3a55/detectron2/modeling/backbone/resnet.py#L614>`_ Create a list of blocks of the same type that forms one ResNet stage. Args: depth (int): The depth of ResNet. Default: 50. norm (str or callable): Normalization for all conv layers. See :func:`detectron2.layers.get_norm` for supported format. Default: `FrozenBN`. num_groups (int): The number of groups for the 3x3 conv layer. Default: 1. width_per_group (int): Baseline width of each group. Scaling this parameters will scale the width of all bottleneck layers. Default: 64. in_channels (int): Output feature channels of the `Stem` Block. Needs to be set to 64 for R18 and R34. Default: 64. out_channels (int): Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet. Default: 256. stride_in_1x1 (bool): Place the stride 2 conv on the 1x1 filter. Use True only for the original MSRA ResNet; use False for C2 and Torch models. Default: False. res5_dilation (int): Apply dilation in stage "res5". Default: 1. deform_on_per_stage (List[bool]): Apply Deformable Convolution in stages. Specify if apply `deform_conv` on Res2, Res3, Res4, Res5. Default: `[False, False, False, False]`. deform_modulated: Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168); Use False for DeformableV1. Default: False. deform_num_groups (int): Number of groups in deformable conv. Default: 1. Returns: list[detectron2.layers.CNNBlockBase]: a list of block module. Examples: :: from detrex.modeling.backbone import make_stage, ResNet, BasicStem resnet50_dc5 = ResNet( stem=BasicStem(in_channels=3, out_channels=64, norm="FrozenBN"), stages=make_stage( depth=50, norm="FrozenBN", in_channels=64, out_channels=256, res5_dilation=2, ), out_features=["res2", "res3", "res4", "res5"], freeze_at=1, ) |
155,932 | import os
import pkg_resources
from omegaconf import OmegaConf
from detectron2.config import LazyConfig
The provided code snippet includes necessary dependencies for implementing the `try_get_key` function. Write a Python function `def try_get_key(cfg, *keys, default=None)` to solve the following problem:
Try select keys from lazy cfg until the first key that exists. Otherwise return default.
Here is the function:
def try_get_key(cfg, *keys, default=None):
"""
Try select keys from lazy cfg until the first key that exists. Otherwise return default.
"""
for k in keys:
none = object()
p = OmegaConf.select(cfg, k, default=none)
if p is not none:
return p
return default | Try select keys from lazy cfg until the first key that exists. Otherwise return default. |
155,933 | import os
import pkg_resources
from omegaconf import OmegaConf
from detectron2.config import LazyConfig
The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config(config_path)` to solve the following problem:
Returns a config object from a config_path. Args: config_path (str): config file name relative to detrex's "configs/" directory, e.g., "common/train.py" Returns: omegaconf.DictConfig: a config object
Here is the function:
def get_config(config_path):
"""
Returns a config object from a config_path.
Args:
config_path (str): config file name relative to detrex's "configs/"
directory, e.g., "common/train.py"
Returns:
omegaconf.DictConfig: a config object
"""
cfg_file = pkg_resources.resource_filename(
"detrex.config", os.path.join("configs", config_path)
)
if not os.path.exists(cfg_file):
raise RuntimeError("{} not available in detrex configs!".format(config_path))
cfg = LazyConfig.load(cfg_file)
return cfg | Returns a config object from a config_path. Args: config_path (str): config file name relative to detrex's "configs/" directory, e.g., "common/train.py" Returns: omegaconf.DictConfig: a config object |
155,934 | import torch.distributed as dist
import os
import torch
import builtins
import datetime
import subprocess
from detectron2.utils import comm
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank() | null |
155,935 | import torch.distributed as dist
import os
import torch
import builtins
import datetime
import subprocess
from detectron2.utils import comm
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
force = force or (get_world_size() > 8)
if is_master or force:
now = datetime.datetime.now().time()
builtin_print('[{}] '.format(now), end='') # print with time stamp
builtin_print(*args, **kwargs)
builtins.print = print
def slurm_init_distributed_mode(args):
assert 'SLURM_PROCID' in os.environ
assert hasattr(args, 'slurm')
######################################
# NOTE: using file://xxxx as dis_url is not stable
# https://shomy.top/2022/01/05/torch-ddp-intro/
if args.slurm.ddp_comm_mode == 'tcp':
node_list = os.environ['SLURM_NODELIST']
master_addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1')
# explicit tcp url
args.dist_url = "tcp://%s:%s"%(master_addr, args.slurm.master_port)
# alternatively, use env vars as below
# os.environ['MASTER_ADDR'] = master_addr
# os.environ['MASTER_PORT'] = f'{args.slurm.master_port}'
# os.environ['RANK'] = str(args.rank)
# os.environ['LOCAL_RANK'] = str(args.rank % torch.cuda.device_count())
# os.environ['WORLD_SIZE'] = str(args.world_size)
# args.dist_url = "env://"
######################################
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
dist.init_process_group(backend='nccl', init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
assert comm._LOCAL_PROCESS_GROUP is None
n_gpus_per_machine = args.slurm.ngpus
num_machines = args.world_size // n_gpus_per_machine
machine_rank = args.rank // n_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * n_gpus_per_machine, (i + 1) * n_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comm._LOCAL_PROCESS_GROUP = pg
comm.synchronize()
# torch.distributed.barrier()
setup_for_distributed(args.rank == 0) | null |
155,936 | from typing import List, Optional
import torch
import torchvision
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `interpolate` function. Write a Python function `def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None)` to solve the following problem:
Equivalent to ``torch.nn.functional.interpolate``.
Here is the function:
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to ``torch.nn.functional.interpolate``.
"""
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) | Equivalent to ``torch.nn.functional.interpolate``. |
155,937 | from typing import List, Optional
import torch
import torchvision
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `inverse_sigmoid` function. Write a Python function `def inverse_sigmoid(x, eps=1e-3)` to solve the following problem:
The inverse function for sigmoid activation function. Note: It might face numberical issues with fp16 small eps.
Here is the function:
def inverse_sigmoid(x, eps=1e-3):
"""
The inverse function for sigmoid activation function.
Note: It might face numberical issues with fp16 small eps.
"""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2) | The inverse function for sigmoid activation function. Note: It might face numberical issues with fp16 small eps. |
155,939 | import argparse
import os
from itertools import chain
import cv2
import tqdm
from detectron2.config import LazyConfig, instantiate
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def setup(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
cfg.dataloader.train.num_workers = 0
return cfg | null |
155,940 | import argparse
import os
from itertools import chain
import cv2
import tqdm
from detectron2.config import LazyConfig, instantiate
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def parse_args(in_args=None):
parser = argparse.ArgumentParser(description="Visualize ground-truth data")
parser.add_argument(
"--source",
choices=["annotation", "dataloader"],
required=True,
help="visualize the annotations or the data loader (with pre-processing)",
)
parser.add_argument("--config-file", metavar="FILE", help="path to config file")
parser.add_argument("--output-dir", default="./", help="path to output directory")
parser.add_argument("--show", action="store_true", help="show output in a window")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser.parse_args(in_args) | null |
155,941 | import argparse
import os
from itertools import chain
import cv2
import tqdm
from detectron2.config import LazyConfig, instantiate
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def output(vis, fname):
if args.show:
print(fname)
cv2.imshow("window", vis.get_image()[:, :, ::-1])
cv2.waitKey()
else:
filepath = os.path.join(dirname, fname)
print("Saving to {} ...".format(filepath))
vis.save(filepath) | null |
155,942 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import default_argument_parser
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
def setup(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
setup_logger(name="fvcore")
setup_logger()
return cfg | null |
155,943 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import default_argument_parser
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def do_flop(cfg):
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_flops = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
flops = FlopCountAnalysis(model, data)
if idx > 0:
flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False)
counts += flops.by_operator()
total_flops.append(flops.total())
logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops))
logger.info(
"Average GFlops for each type of operators:\n"
+ str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()])
)
logger.info(
"Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9)
) | null |
155,944 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import default_argument_parser
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def do_activation(cfg):
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_activations = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
count = activation_count_operators(model, data)
counts += count
total_activations.append(sum(count.values()))
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
logger.info(
"Total (Million) Activations: {}±{}".format(
np.mean(total_activations), np.std(total_activations)
)
) | null |
155,945 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import default_argument_parser
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def do_parameter(cfg):
model = instantiate(cfg.model)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5)) | null |
155,946 | import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import default_argument_parser
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def do_structure(cfg):
model = instantiate(cfg.model)
logger.info("Model Structure:\n" + str(model)) | null |
155,947 | import logging
import os
import sys
import time
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import (
SimpleTrainer,
default_argument_parser,
default_setup,
hooks,
launch,
)
from detectron2.engine.defaults import create_ddp_model
from detectron2.evaluation import inference_on_dataset, print_csv_format
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from detectron2.utils.events import (
CommonMetricPrinter,
JSONWriter,
TensorboardXWriter
)
from detectron2.checkpoint import DetectionCheckpointer
from detrex.utils import WandbWriter
from detrex.modeling import ema
class Trainer(SimpleTrainer):
"""
We've combine Simple and AMP Trainer together.
"""
def __init__(
self,
model,
dataloader,
optimizer,
amp=False,
clip_grad_params=None,
grad_scaler=None,
):
super().__init__(model=model, data_loader=dataloader, optimizer=optimizer)
unsupported = "AMPTrainer does not support single-process multi-device training!"
if isinstance(model, DistributedDataParallel):
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
assert not isinstance(model, DataParallel), unsupported
if amp:
if grad_scaler is None:
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
# set True to use amp training
self.amp = amp
# gradient clip hyper-params
self.clip_grad_params = clip_grad_params
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[Trainer] model was changed to eval mode!"
assert torch.cuda.is_available(), "[Trainer] CUDA is required for AMP training!"
from torch.cuda.amp import autocast
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
with autocast(enabled=self.amp):
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {"total_loss": loss_dict}
else:
losses = sum(loss_dict.values())
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
if self.amp:
self.grad_scaler.scale(losses).backward()
if self.clip_grad_params is not None:
self.grad_scaler.unscale_(self.optimizer)
self.clip_grads(self.model.parameters())
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
else:
losses.backward()
if self.clip_grad_params is not None:
self.clip_grads(self.model.parameters())
self.optimizer.step()
self._write_metrics(loss_dict, data_time)
def clip_grads(self, params):
params = list(filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return torch.nn.utils.clip_grad_norm_(
parameters=params,
**self.clip_grad_params,
)
def state_dict(self):
ret = super().state_dict()
if self.grad_scaler and self.amp:
ret["grad_scaler"] = self.grad_scaler.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
if self.grad_scaler and self.amp:
self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
def do_test(cfg, model, eval_only=False):
logger = logging.getLogger("detectron2")
if eval_only:
logger.info("Run evaluation under eval-only mode")
if cfg.train.model_ema.enabled and cfg.train.model_ema.use_ema_weights_for_eval_only:
logger.info("Run evaluation with EMA.")
else:
logger.info("Run evaluation without EMA.")
if "evaluator" in cfg.dataloader:
ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ret)
return ret
logger.info("Run evaluation without EMA.")
if "evaluator" in cfg.dataloader:
ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ret)
if cfg.train.model_ema.enabled:
logger.info("Run evaluation with EMA.")
with ema.apply_model_ema_and_restore(model):
if "evaluator" in cfg.dataloader:
ema_ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ema_ret)
ret.update(ema_ret)
return ret
The provided code snippet includes necessary dependencies for implementing the `do_train` function. Write a Python function `def do_train(args, cfg)` to solve the following problem:
Args: cfg: an object with the following attributes: model: instantiate to a module dataloader.{train,test}: instantiate to dataloaders dataloader.evaluator: instantiate to evaluator for test set optimizer: instantaite to an optimizer lr_multiplier: instantiate to a fvcore scheduler train: other misc config defined in `configs/common/train.py`, including: output_dir (str) init_checkpoint (str) amp.enabled (bool) max_iter (int) eval_period, log_period (int) device (str) checkpointer (dict) ddp (dict)
Here is the function:
def do_train(args, cfg):
"""
Args:
cfg: an object with the following attributes:
model: instantiate to a module
dataloader.{train,test}: instantiate to dataloaders
dataloader.evaluator: instantiate to evaluator for test set
optimizer: instantaite to an optimizer
lr_multiplier: instantiate to a fvcore scheduler
train: other misc config defined in `configs/common/train.py`, including:
output_dir (str)
init_checkpoint (str)
amp.enabled (bool)
max_iter (int)
eval_period, log_period (int)
device (str)
checkpointer (dict)
ddp (dict)
"""
model = instantiate(cfg.model)
logger = logging.getLogger("detectron2")
logger.info("Model:\n{}".format(model))
model.to(cfg.train.device)
# instantiate optimizer
cfg.optimizer.params.model = model
optim = instantiate(cfg.optimizer)
# build training loader
train_loader = instantiate(cfg.dataloader.train)
# create ddp model
model = create_ddp_model(model, **cfg.train.ddp)
# build model ema
ema.may_build_model_ema(cfg, model)
trainer = Trainer(
model=model,
dataloader=train_loader,
optimizer=optim,
amp=cfg.train.amp.enabled,
clip_grad_params=cfg.train.clip_grad.params if cfg.train.clip_grad.enabled else None,
)
checkpointer = DetectionCheckpointer(
model,
cfg.train.output_dir,
trainer=trainer,
# save model ema
**ema.may_get_ema_checkpointer(cfg, model)
)
if comm.is_main_process():
# writers = default_writers(cfg.train.output_dir, cfg.train.max_iter)
output_dir = cfg.train.output_dir
PathManager.mkdirs(output_dir)
writers = [
CommonMetricPrinter(cfg.train.max_iter),
JSONWriter(os.path.join(output_dir, "metrics.json")),
TensorboardXWriter(output_dir),
]
if cfg.train.wandb.enabled:
PathManager.mkdirs(cfg.train.wandb.params.dir)
writers.append(WandbWriter(cfg))
trainer.register_hooks(
[
hooks.IterationTimer(),
ema.EMAHook(cfg, model) if cfg.train.model_ema.enabled else None,
hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),
hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer)
if comm.is_main_process()
else None,
hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)),
hooks.PeriodicWriter(
writers,
period=cfg.train.log_period,
)
if comm.is_main_process()
else None,
]
)
checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)
if args.resume and checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
start_iter = trainer.iter + 1
else:
start_iter = 0
trainer.train(start_iter, cfg.train.max_iter) | Args: cfg: an object with the following attributes: model: instantiate to a module dataloader.{train,test}: instantiate to dataloaders dataloader.evaluator: instantiate to evaluator for test set optimizer: instantaite to an optimizer lr_multiplier: instantiate to a fvcore scheduler train: other misc config defined in `configs/common/train.py`, including: output_dir (str) init_checkpoint (str) amp.enabled (bool) max_iter (int) eval_period, log_period (int) device (str) checkpointer (dict) ddp (dict) |
155,948 | import itertools
import logging
import psutil
import torch
import tqdm
from fvcore.common.timer import Timer
from torch.nn.parallel import DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data import (
DatasetFromList,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data.benchmark import DataLoaderBenchmark
from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
from detectron2.modeling import build_model
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.events import CommonMetricPrinter
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
def create_data_benchmark(cfg, args):
def RAM_msg():
def benchmark_data(args):
cfg = setup(args)
logger.info("After spawning " + RAM_msg())
benchmark = create_data_benchmark(cfg, args)
benchmark.benchmark_distributed(250, 10)
# test for a few more rounds
for k in range(10):
logger.info(f"Iteration {k} " + RAM_msg())
benchmark.benchmark_distributed(250, 1) | null |
155,949 | import itertools
import logging
import psutil
import torch
import tqdm
from fvcore.common.timer import Timer
from torch.nn.parallel import DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data import (
DatasetFromList,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data.benchmark import DataLoaderBenchmark
from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
from detectron2.modeling import build_model
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.events import CommonMetricPrinter
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
setup_logger(distributed_rank=comm.get_rank())
return cfg
def create_data_benchmark(cfg, args):
if args.config_file.endswith(".py"):
dl_cfg = cfg.dataloader.train
dl_cfg._target_ = DataLoaderBenchmark
return instantiate(dl_cfg)
else:
kwargs = build_detection_train_loader.from_config(cfg)
kwargs.pop("aspect_ratio_grouping", None)
kwargs["_target_"] = DataLoaderBenchmark
return instantiate(kwargs)
def benchmark_data_advanced(args):
# benchmark dataloader with more details to help analyze performance bottleneck
cfg = setup(args)
benchmark = create_data_benchmark(cfg, args)
if comm.get_rank() == 0:
benchmark.benchmark_dataset(100)
benchmark.benchmark_mapper(100)
benchmark.benchmark_workers(100, warmup=10)
benchmark.benchmark_IPC(100, warmup=10)
if comm.get_world_size() > 1:
benchmark.benchmark_distributed(100)
logger.info("Rerun ...")
benchmark.benchmark_distributed(100) | null |
155,950 | import itertools
import logging
import psutil
import torch
import tqdm
from fvcore.common.timer import Timer
from torch.nn.parallel import DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data import (
DatasetFromList,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data.benchmark import DataLoaderBenchmark
from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
from detectron2.modeling import build_model
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.events import CommonMetricPrinter
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
def benchmark_train(args):
cfg = setup(args)
model = instantiate(cfg.model)
logger.info("Model:\n{}".format(model))
model.to(cfg.train.device)
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
cfg.optimizer.params.model = model
optimizer = instantiate(cfg.optimizer)
checkpointer = DetectionCheckpointer(model, optimizer=optimizer)
checkpointer.load(cfg.train.init_checkpoint)
cfg.dataloader.train.num_workers = 2
data_loader = instantiate(cfg.dataloader.train)
dummy_data = list(itertools.islice(data_loader, 100))
def f():
data = DatasetFromList(dummy_data, copy=False, serialize=False)
while True:
yield from data
max_iter = 400
trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)(model, f(), optimizer)
trainer.register_hooks(
[
hooks.IterationTimer(),
hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]),
hooks.TorchProfiler(
lambda trainer: trainer.iter == max_iter - 1,
cfg.train.output_dir,
save_tensorboard=True,
),
]
)
trainer.train(1, max_iter) | null |
155,951 | import itertools
import logging
import psutil
import torch
import tqdm
from fvcore.common.timer import Timer
from torch.nn.parallel import DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.data import (
DatasetFromList,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data.benchmark import DataLoaderBenchmark
from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
from detectron2.modeling import build_model
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.events import CommonMetricPrinter
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
setup_logger(distributed_rank=comm.get_rank())
return cfg
def benchmark_eval(args):
cfg = setup(args)
if args.config_file.endswith(".yaml"):
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
else:
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
cfg.dataloader.num_workers = 0
data_loader = instantiate(cfg.dataloader.test)
model.eval()
logger.info("Model:\n{}".format(model))
dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)), copy=False)
def f():
while True:
yield from dummy_data
for k in range(5): # warmup
model(dummy_data[k])
max_iter = 300
timer = Timer()
with tqdm.tqdm(total=max_iter) as pbar:
for idx, d in enumerate(f()):
if idx == max_iter:
break
model(d)
pbar.update()
logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds())) | null |
155,952 | import sys
import os
from tools.train_net import main
import hydra
from hydra.utils import get_original_cwd
from omegaconf import OmegaConf, DictConfig
from detectron2.engine import launch
from detectron2.config import LazyConfig
import os.path as osp
import submitit
import uuid
from pathlib import Path
from detrex.utils.dist import slurm_init_distributed_mode
def _find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binding to port 0 will cause the OS to find an available port for us
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
# NOTE: there is still a chance the port could be taken by other processes.
return port
def get_dist_url(ddp_comm_mode='tcp', share_root=None):
if ddp_comm_mode == 'file':
assert share_root is not None
return get_init_file(share_root).as_uri()
elif ddp_comm_mode == 'tcp':
return 'env://'
else:
raise ValueError('Unknown DDP communication mode')
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
self._setup_gpu_args()
if self.args.world_size > 1:
slurm_init_distributed_mode(self.args)
if not self.args.eval_only: # always auto resume if in training
self.args.resume = True
main(self.args)
def checkpoint(self): # being called when met timeout or preemption signal is received
import os
import submitit
self.args.dist_url = get_dist_url(
ddp_comm_mode=self.args.slurm.ddp_comm_mode,
share_root=self.args.slurm.share_root)
self.args.resume = True
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
job_env = submitit.JobEnvironment()
# https://shomy.top/2022/01/05/torch-ddp-intro/
# self.args.dist_url = f'tcp://{job_env.hostname}:{self.args.slurm.port}'
# self.args.output_dir = self.args.slurm.job_dir
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
self.args.machine_rank = job_env.node
self.args.slurm.jobid = job_env.job_id # just in case of need, e.g. logging to wandb
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
# Enable fast debugging by running several iterations to check for any bugs.
if cfg.train.fast_dev_run.enabled:
cfg.train.max_iter = 20
cfg.train.eval_period = 10
cfg.train.log_period = 1
if args.eval_only:
model = instantiate(cfg.model)
model.to(cfg.train.device)
model = create_ddp_model(model)
# using ema for evaluation
ema.may_build_model_ema(cfg, model)
DetectionCheckpointer(model, **ema.may_get_ema_checkpointer(cfg, model)).load(cfg.train.init_checkpoint)
# Apply ema state for evaluation
if cfg.train.model_ema.enabled and cfg.train.model_ema.use_ema_weights_for_eval_only:
ema.apply_model_ema(model)
print(do_test(cfg, model, eval_only=True))
else:
do_train(args, cfg)
def hydra_app(args:DictConfig):
# NOTE: enable write to unknow field of cfg
# hence it behaves like argparse.NameSpace
# this is required as some args are determined at runtime
# https://stackoverflow.com/a/66296809
OmegaConf.set_struct(args, False)
# TODO: switch to hydra 1.3+, which natrually supports relative path
# the following workaround is for hydra 1.1.2
hydra_cfg = hydra.core.hydra_config.HydraConfig.get()
# since hydra 1.1.2 will change PWD to run dir, get current work dir first
args.config_file = osp.join(get_original_cwd(), args.config_file)
# command line args starting with '+' are for overrides, except '+slurm=[cluster_id]'
args.opts = [ x.replace('+', '') for x in hydra_cfg['overrides']['task'] if (x.startswith('+')
and not x.startswith('+slurm'))]
# print(args.opts)
hydra_run_dir = os.path.join(get_original_cwd(), hydra_cfg['run']['dir'])
if args.auto_output_dir:
args.opts.append(f"train.output_dir={hydra_run_dir}")
# print(args.opts)
# test args
# print(OmegaConf.to_yaml(args, resolve=True))
if not hasattr(args, 'slurm'): # run locally
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
else: # run with slurm
if args.slurm.job_dir is None: # use hydra run_dir as slurm output dir
hydra_cfg = hydra.core.hydra_config.HydraConfig.get()
args.slurm.job_dir = hydra_run_dir
if args.slurm.master_port is None: # automatically find free port for ddp communication
args.slurm.master_port = _find_free_port()
executor = submitit.AutoExecutor(folder=args.slurm.job_dir, slurm_max_num_timeout=30)
############## NOTE: this part is highly dependent on slurm version ##############
kwargs = {}
if args.slurm.comment:
kwargs['slurm_comment'] = args.slurm.comment
# NOTE: slurm of different versions may have different flags
# slurm_additional_parameters is flexible to cope with this scenario
slurm_additional_parameters={'ntasks': args.slurm.nodes*args.slurm.ngpus,
'gres': f'gpu:{args.slurm.ngpus}',
'ntasks-per-node': args.slurm.ngpus} # one task per GPU
if args.slurm.exclude_node:
slurm_additional_parameters['exclude'] = args.slurm.exclude_node
if args.slurm.quotatype:
slurm_additional_parameters['quotatype'] = args.slurm.quotatype
##################################################################################
executor.update_parameters(
## original
# mem_gb=40 * num_gpus_per_node,
# gpus_per_node=num_gpus_per_node,
# tasks_per_node=num_gpus_per_node, # one task per GPU
# nodes=nodes,
# timeout_min=timeout_min, # max is 60 * 72
## https://github.com/facebookincubator/submitit/issues/1639
# mem_per_cpu=4000,
# gpus_per_node=num_gpus_per_node,
# cpus_per_task=4,
cpus_per_task=args.slurm.cpus_per_task,
nodes=args.slurm.nodes,
slurm_additional_parameters=slurm_additional_parameters,
timeout_min=args.slurm.timeout * 60, # in minutes
# Below are cluster dependent parameters
slurm_partition=args.slurm.partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name=args.slurm.job_name)
args.dist_url = get_dist_url(
ddp_comm_mode=args.slurm.ddp_comm_mode,
share_root=args.slurm.share_root)
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id) | null |
155,953 | from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x22\x3e\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x75\x74\x66\
\x2d\x38\x22\x3f\x3e\x0d\x0a\x3c\x21\x2d\x2d\x20\x47\x65\x6e\x65\
\x72\x61\x74\x6f\x72\x3a\x20\x41\x64\x6f\x62\x65\x20\x49\x6c\x6c\
\x75\x73\x74\x72\x61\x74\x6f\x72\x20\x31\x35\x2e\x30\x2e\x30\x2c\
\x20\x53\x56\x47\x20\x45\x78\x70\x6f\x72\x74\x20\x50\x6c\x75\x67\
\x2d\x49\x6e\x20\x2e\x20\x53\x56\x47\x20\x56\x65\x72\x73\x69\x6f\
\x6e\x3a\x20\x36\x2e\x30\x30\x20\x42\x75\x69\x6c\x64\x20\x30\x29\
\x20\x20\x2d\x2d\x3e\x0d\x0a\x3c\x21\x44\x4f\x43\x54\x59\x50\x45\
\x20\x73\x76\x67\x20\x50\x55\x42\x4c\x49\x43\x20\x22\x2d\x2f\x2f\
\x57\x33\x43\x2f\x2f\x44\x54\x44\x20\x53\x56\x47\x20\x31\x2e\x31\
\x2f\x2f\x45\x4e\x22\x20\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x47\x72\x61\x70\x68\x69\x63\
\x73\x2f\x53\x56\x47\x2f\x31\x2e\x31\x2f\x44\x54\x44\x2f\x73\x76\
\x67\x31\x31\x2e\x64\x74\x64\x22\x3e\x0d\x0a\x3c\x73\x76\x67\x20\
\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x20\x69\x64\
\x3d\x22\xe5\x9b\xbe\xe5\xb1\x82\x5f\x31\x22\x20\x78\x6d\x6c\x6e\
\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\
\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x20\x78\
\x6d\x6c\x6e\x73\x3a\x78\x6c\x69\x6e\x6b\x3d\x22\x68\x74\x74\x70\
\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\
\x39\x39\x2f\x78\x6c\x69\x6e\x6b\x22\x20\x78\x3d\x22\x30\x70\x78\
\x22\x20\x79\x3d\x22\x30\x70\x78\x22\x0d\x0a\x09\x20\x77\x69\x64\
\x74\x68\x3d\x22\x31\x35\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\
\x74\x3d\x22\x33\x34\x70\x78\x22\x20\x76\x69\x65\x77\x42\x6f\x78\
\x3d\x22\x30\x20\x30\x20\x31\x35\x38\x20\x33\x34\x22\x20\x65\x6e\
\x61\x62\x6c\x65\x2d\x62\x61\x63\x6b\x67\x72\x6f\x75\x6e\x64\x3d\
\x22\x6e\x65\x77\x20\x30\x20\x30\x20\x31\x35\x38\x20\x33\x34\x22\
\x20\x78\x6d\x6c\x3a\x73\x70\x61\x63\x65\x3d\x22\x70\x72\x65\x73\
\x65\x72\x76\x65\x22\x3e\x0d\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\
\x6c\x6c\x3d\x22\x23\x30\x30\x46\x41\x46\x30\x22\x20\x64\x3d\x22\
\x4d\x31\x31\x2e\x36\x34\x2c\x31\x33\x2e\x34\x33\x34\x76\x2d\x31\
\x2e\x33\x31\x31\x63\x2d\x30\x2e\x34\x35\x36\x2d\x30\x2e\x30\x35\
\x35\x2d\x30\x2e\x39\x31\x31\x2d\x30\x2e\x30\x39\x31\x2d\x31\x2e\
\x33\x38\x35\x2d\x30\x2e\x30\x39\x31\x63\x2d\x35\x2e\x36\x34\x37\
\x2c\x30\x2d\x31\x30\x2e\x32\x35\x36\x2c\x34\x2e\x35\x38\x38\x2d\
\x31\x30\x2e\x32\x35\x36\x2c\x31\x30\x2e\x32\x35\x31\x0d\x0a\x09\
\x63\x30\x2c\x33\x2e\x34\x35\x39\x2c\x31\x2e\x37\x33\x31\x2c\x36\
\x2e\x35\x33\x35\x2c\x34\x2e\x33\x37\x32\x2c\x38\x2e\x33\x39\x33\
\x63\x2d\x31\x2e\x37\x31\x32\x2d\x31\x2e\x38\x33\x39\x2d\x32\x2e\
\x37\x35\x2d\x34\x2e\x32\x37\x39\x2d\x32\x2e\x37\x35\x2d\x36\x2e\
\x39\x37\x33\x43\x31\x2e\x36\x33\x39\x2c\x31\x38\x2e\x31\x31\x33\
\x2c\x36\x2e\x31\x30\x32\x2c\x31\x33\x2e\x35\x36\x32\x2c\x31\x31\
\x2e\x36\x34\x2c\x31\x33\x2e\x34\x33\x34\x7a\x22\x2f\x3e\x0d\x0a\
\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x30\x30\x46\
\x41\x46\x30\x22\x20\x64\x3d\x22\x4d\x31\x31\x2e\x38\x37\x36\x2c\
\x32\x38\x2e\x33\x34\x36\x63\x32\x2e\x35\x31\x34\x2c\x30\x2c\x34\
\x2e\x35\x37\x32\x2d\x32\x2e\x30\x30\x34\x2c\x34\x2e\x36\x36\x34\
\x2d\x34\x2e\x34\x39\x38\x56\x31\x2e\x35\x34\x35\x68\x34\x2e\x30\
\x38\x31\x63\x2d\x30\x2e\x30\x39\x31\x2d\x30\x2e\x34\x35\x35\x2d\
\x30\x2e\x31\x32\x37\x2d\x30\x2e\x39\x32\x38\x2d\x30\x2e\x31\x32\
\x37\x2d\x31\x2e\x34\x32\x48\x31\x34\x2e\x39\x32\x0d\x0a\x09\x76\
\x32\x32\x2e\x33\x30\x33\x63\x2d\x30\x2e\x30\x39\x31\x2c\x32\x2e\
\x34\x39\x34\x2d\x32\x2e\x31\x35\x2c\x34\x2e\x34\x39\x38\x2d\x34\
\x2e\x36\x36\x34\x2c\x34\x2e\x34\x39\x38\x63\x2d\x30\x2e\x37\x38\
\x33\x2c\x30\x2d\x31\x2e\x35\x33\x2d\x30\x2e\x32\x30\x31\x2d\x32\
\x2e\x31\x36\x37\x2d\x30\x2e\x35\x34\x37\x43\x38\x2e\x39\x34\x34\
\x2c\x32\x37\x2e\x35\x38\x2c\x31\x30\x2e\x33\x32\x38\x2c\x32\x38\
\x2e\x33\x34\x36\x2c\x31\x31\x2e\x38\x37\x36\x2c\x32\x38\x2e\x33\
\x34\x36\x7a\x22\x2f\x3e\x0d\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\
\x6c\x6c\x3d\x22\x23\x30\x30\x46\x41\x46\x30\x22\x20\x64\x3d\x22\
\x4d\x32\x38\x2e\x32\x35\x31\x2c\x39\x2e\x31\x31\x39\x56\x37\x2e\
\x38\x38\x31\x63\x2d\x31\x2e\x35\x34\x38\x2c\x30\x2d\x33\x2e\x30\
\x30\x36\x2d\x30\x2e\x34\x35\x35\x2d\x34\x2e\x32\x32\x36\x2d\x31\
\x2e\x32\x35\x36\x43\x32\x35\x2e\x31\x31\x38\x2c\x37\x2e\x38\x36\
\x33\x2c\x32\x36\x2e\x35\x39\x34\x2c\x38\x2e\x37\x35\x35\x2c\x32\
\x38\x2e\x32\x35\x31\x2c\x39\x2e\x31\x31\x39\x7a\x22\x2f\x3e\x0d\
\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\
\x30\x30\x35\x30\x22\x20\x64\x3d\x22\x4d\x32\x34\x2e\x30\x34\x35\
\x2c\x36\x2e\x36\x32\x35\x63\x2d\x31\x2e\x31\x38\x34\x2d\x31\x2e\
\x33\x36\x35\x2d\x31\x2e\x39\x31\x33\x2d\x33\x2e\x31\x33\x31\x2d\
\x31\x2e\x39\x31\x33\x2d\x35\x2e\x30\x38\x68\x2d\x31\x2e\x34\x39\
\x34\x43\x32\x31\x2e\x30\x33\x39\x2c\x33\x2e\x36\x35\x38\x2c\x32\
\x32\x2e\x32\x39\x36\x2c\x35\x2e\x34\x37\x39\x2c\x32\x34\x2e\x30\
\x34\x35\x2c\x36\x2e\x36\x32\x35\x7a\x22\x2f\x3e\x0d\x0a\x3c\x70\
\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\x30\x30\x35\
\x30\x22\x20\x64\x3d\x22\x4d\x31\x30\x2e\x32\x35\x35\x2c\x31\x37\
\x2e\x35\x38\x34\x63\x2d\x32\x2e\x35\x38\x36\x2c\x30\x2d\x34\x2e\
\x36\x38\x32\x2c\x32\x2e\x30\x39\x34\x2d\x34\x2e\x36\x38\x32\x2c\
\x34\x2e\x36\x38\x63\x30\x2c\x31\x2e\x38\x30\x33\x2c\x31\x2e\x30\
\x32\x31\x2c\x33\x2e\x33\x35\x2c\x32\x2e\x35\x31\x34\x2c\x34\x2e\
\x31\x33\x33\x0d\x0a\x09\x63\x2d\x30\x2e\x35\x34\x36\x2d\x30\x2e\
\x37\x36\x35\x2d\x30\x2e\x38\x39\x33\x2d\x31\x2e\x37\x31\x31\x2d\
\x30\x2e\x38\x39\x33\x2d\x32\x2e\x37\x32\x39\x63\x30\x2d\x32\x2e\
\x35\x38\x36\x2c\x32\x2e\x30\x39\x35\x2d\x34\x2e\x36\x38\x31\x2c\
\x34\x2e\x36\x38\x32\x2d\x34\x2e\x36\x38\x31\x63\x30\x2e\x34\x37\
\x34\x2c\x30\x2c\x30\x2e\x39\x34\x37\x2c\x30\x2e\x30\x37\x34\x2c\
\x31\x2e\x33\x38\x34\x2c\x30\x2e\x32\x32\x76\x2d\x35\x2e\x36\x38\
\x0d\x0a\x09\x63\x2d\x30\x2e\x34\x35\x35\x2d\x30\x2e\x30\x35\x35\
\x2d\x30\x2e\x39\x31\x31\x2d\x30\x2e\x30\x39\x31\x2d\x31\x2e\x33\
\x38\x34\x2d\x30\x2e\x30\x39\x31\x63\x2d\x30\x2e\x30\x37\x33\x2c\
\x30\x2d\x30\x2e\x31\x36\x34\x2c\x30\x2d\x30\x2e\x32\x33\x37\x2c\
\x30\x76\x34\x2e\x33\x37\x43\x31\x31\x2e\x32\x30\x32\x2c\x31\x37\
\x2e\x36\x35\x38\x2c\x31\x30\x2e\x37\x34\x37\x2c\x31\x37\x2e\x35\
\x38\x34\x2c\x31\x30\x2e\x32\x35\x35\x2c\x31\x37\x2e\x35\x38\x34\
\x7a\x22\x2f\x3e\x0d\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\
\x3d\x22\x23\x46\x46\x30\x30\x35\x30\x22\x20\x64\x3d\x22\x4d\x32\
\x38\x2e\x32\x35\x32\x2c\x39\x2e\x31\x31\x39\x76\x34\x2e\x33\x33\
\x33\x63\x2d\x32\x2e\x38\x39\x36\x2c\x30\x2d\x35\x2e\x35\x35\x36\
\x2d\x30\x2e\x39\x32\x38\x2d\x37\x2e\x37\x34\x32\x2d\x32\x2e\x34\
\x39\x34\x76\x31\x31\x2e\x33\x30\x36\x63\x30\x2c\x35\x2e\x36\x34\
\x36\x2d\x34\x2e\x35\x39\x2c\x31\x30\x2e\x32\x35\x2d\x31\x30\x2e\
\x32\x35\x35\x2c\x31\x30\x2e\x32\x35\x0d\x0a\x09\x63\x2d\x32\x2e\
\x31\x38\x36\x2c\x30\x2d\x34\x2e\x32\x30\x38\x2d\x30\x2e\x36\x39\
\x31\x2d\x35\x2e\x38\x36\x36\x2d\x31\x2e\x38\x35\x35\x63\x31\x2e\
\x38\x37\x36\x2c\x32\x2e\x30\x30\x33\x2c\x34\x2e\x35\x33\x36\x2c\
\x33\x2e\x32\x37\x37\x2c\x37\x2e\x35\x30\x35\x2c\x33\x2e\x32\x37\
\x37\x63\x35\x2e\x36\x34\x37\x2c\x30\x2c\x31\x30\x2e\x32\x35\x36\
\x2d\x34\x2e\x35\x38\x38\x2c\x31\x30\x2e\x32\x35\x36\x2d\x31\x30\
\x2e\x32\x35\x32\x56\x31\x32\x2e\x33\x37\x38\x0d\x0a\x09\x63\x32\
\x2e\x31\x38\x36\x2c\x31\x2e\x35\x36\x36\x2c\x34\x2e\x38\x36\x34\
\x2c\x32\x2e\x34\x39\x35\x2c\x37\x2e\x37\x34\x32\x2c\x32\x2e\x34\
\x39\x35\x56\x39\x2e\x33\x30\x31\x43\x32\x39\x2e\x33\x32\x37\x2c\
\x39\x2e\x32\x38\x33\x2c\x32\x38\x2e\x37\x38\x31\x2c\x39\x2e\x32\
\x32\x39\x2c\x32\x38\x2e\x32\x35\x32\x2c\x39\x2e\x31\x31\x39\x7a\
\x22\x2f\x3e\x0d\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\
\x22\x23\x46\x46\x46\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x32\x30\
\x2e\x35\x30\x39\x2c\x32\x32\x2e\x32\x36\x36\x56\x31\x30\x2e\x39\
\x35\x38\x63\x32\x2e\x31\x38\x36\x2c\x31\x2e\x35\x36\x36\x2c\x34\
\x2e\x38\x36\x34\x2c\x32\x2e\x34\x39\x34\x2c\x37\x2e\x37\x34\x32\
\x2c\x32\x2e\x34\x39\x34\x56\x39\x2e\x31\x32\x63\x2d\x31\x2e\x36\
\x37\x36\x2d\x30\x2e\x33\x36\x34\x2d\x33\x2e\x31\x33\x33\x2d\x31\
\x2e\x32\x35\x36\x2d\x34\x2e\x32\x32\x37\x2d\x32\x2e\x34\x39\x34\
\x0d\x0a\x09\x63\x2d\x31\x2e\x37\x34\x39\x2d\x31\x2e\x31\x32\x39\
\x2d\x33\x2e\x30\x30\x35\x2d\x32\x2e\x39\x34\x39\x2d\x33\x2e\x33\
\x38\x38\x2d\x35\x2e\x30\x38\x68\x2d\x34\x2e\x30\x38\x56\x32\x33\
\x2e\x38\x35\x63\x2d\x30\x2e\x30\x39\x31\x2c\x32\x2e\x34\x39\x34\
\x2d\x32\x2e\x31\x35\x2c\x34\x2e\x34\x39\x36\x2d\x34\x2e\x36\x36\
\x34\x2c\x34\x2e\x34\x39\x36\x63\x2d\x31\x2e\x35\x36\x36\x2c\x30\
\x2d\x32\x2e\x39\x35\x31\x2d\x30\x2e\x37\x36\x35\x2d\x33\x2e\x37\
\x38\x39\x2d\x31\x2e\x39\x34\x37\x0d\x0a\x09\x63\x2d\x31\x2e\x34\
\x39\x34\x2d\x30\x2e\x37\x38\x33\x2d\x32\x2e\x35\x31\x34\x2d\x32\
\x2e\x33\x35\x2d\x32\x2e\x35\x31\x34\x2d\x34\x2e\x31\x33\x33\x63\
\x30\x2d\x32\x2e\x35\x38\x36\x2c\x32\x2e\x30\x39\x35\x2d\x34\x2e\
\x36\x38\x2c\x34\x2e\x36\x38\x32\x2d\x34\x2e\x36\x38\x63\x30\x2e\
\x34\x37\x34\x2c\x30\x2c\x30\x2e\x39\x34\x37\x2c\x30\x2e\x30\x37\
\x32\x2c\x31\x2e\x33\x38\x34\x2c\x30\x2e\x32\x31\x39\x76\x2d\x34\
\x2e\x33\x37\x0d\x0a\x09\x63\x2d\x35\x2e\x35\x33\x36\x2c\x30\x2e\
\x31\x32\x37\x2d\x31\x30\x2c\x34\x2e\x36\x37\x38\x2d\x31\x30\x2c\
\x31\x30\x2e\x32\x33\x31\x63\x30\x2c\x32\x2e\x36\x39\x35\x2c\x31\
\x2e\x30\x33\x39\x2c\x35\x2e\x31\x35\x32\x2c\x32\x2e\x37\x35\x31\
\x2c\x36\x2e\x39\x37\x35\x63\x31\x2e\x36\x35\x38\x2c\x31\x2e\x31\
\x36\x34\x2c\x33\x2e\x36\x39\x38\x2c\x31\x2e\x38\x35\x36\x2c\x35\
\x2e\x38\x36\x36\x2c\x31\x2e\x38\x35\x36\x0d\x0a\x09\x43\x31\x35\
\x2e\x39\x2c\x33\x32\x2e\x35\x31\x36\x2c\x32\x30\x2e\x35\x30\x39\
\x2c\x32\x37\x2e\x39\x30\x38\x2c\x32\x30\x2e\x35\x30\x39\x2c\x32\
\x32\x2e\x32\x36\x36\x7a\x22\x2f\x3e\x0d\x0a\x3c\x67\x3e\x0d\x0a\
\x09\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\
\x46\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x34\x34\x2e\x35\x37\x36\
\x2c\x31\x35\x2e\x38\x37\x36\x63\x30\x2e\x33\x35\x33\x2d\x30\x2e\
\x30\x38\x2c\x30\x2e\x35\x32\x32\x2c\x30\x2e\x30\x36\x34\x2c\x30\
\x2e\x35\x30\x36\x2c\x30\x2e\x34\x33\x34\x6c\x2d\x30\x2e\x31\x32\
\x2c\x32\x2e\x32\x36\x35\x63\x2d\x30\x2e\x30\x31\x37\x2c\x30\x2e\
\x33\x37\x2d\x30\x2e\x32\x30\x31\x2c\x30\x2e\x35\x39\x35\x2d\x30\
\x2e\x35\x35\x34\x2c\x30\x2e\x36\x37\x35\x0d\x0a\x09\x09\x6c\x2d\
\x31\x2e\x36\x31\x34\x2c\x30\x2e\x34\x30\x39\x76\x35\x2e\x32\x30\
\x34\x63\x30\x2c\x30\x2e\x38\x32\x2d\x30\x2e\x30\x38\x39\x2c\x31\
\x2e\x34\x35\x35\x2d\x30\x2e\x32\x36\x35\x2c\x31\x2e\x39\x30\x34\
\x63\x2d\x30\x2e\x31\x36\x31\x2c\x30\x2e\x34\x34\x39\x2d\x30\x2e\
\x34\x36\x36\x2c\x30\x2e\x37\x39\x35\x2d\x30\x2e\x39\x31\x36\x2c\
\x31\x2e\x30\x33\x35\x63\x2d\x30\x2e\x37\x32\x33\x2c\x30\x2e\x34\
\x31\x38\x2d\x31\x2e\x37\x37\x35\x2c\x30\x2e\x36\x31\x39\x2d\x33\
\x2e\x31\x35\x37\x2c\x30\x2e\x36\x30\x34\x0d\x0a\x09\x09\x63\x2d\
\x30\x2e\x34\x39\x38\x2c\x30\x2e\x30\x31\x36\x2d\x30\x2e\x37\x36\
\x33\x2d\x30\x2e\x31\x36\x32\x2d\x30\x2e\x37\x39\x35\x2d\x30\x2e\
\x35\x33\x31\x6c\x2d\x30\x2e\x32\x34\x31\x2d\x32\x2e\x30\x34\x37\
\x63\x2d\x30\x2e\x30\x33\x32\x2d\x30\x2e\x33\x37\x2c\x30\x2e\x31\
\x33\x37\x2d\x30\x2e\x35\x35\x35\x2c\x30\x2e\x35\x30\x36\x2d\x30\
\x2e\x35\x35\x35\x68\x30\x2e\x39\x38\x38\x0d\x0a\x09\x09\x63\x30\
\x2e\x33\x30\x35\x2c\x30\x2e\x30\x31\x36\x2c\x30\x2e\x34\x35\x38\
\x2d\x30\x2e\x31\x32\x39\x2c\x30\x2e\x34\x35\x38\x2d\x30\x2e\x34\
\x33\x34\x76\x2d\x34\x2e\x33\x33\x38\x6c\x2d\x31\x2e\x36\x36\x32\
\x2c\x30\x2e\x34\x31\x63\x2d\x30\x2e\x33\x35\x34\x2c\x30\x2e\x30\
\x38\x2d\x30\x2e\x35\x33\x39\x2d\x30\x2e\x30\x36\x34\x2d\x30\x2e\
\x35\x35\x34\x2d\x30\x2e\x34\x33\x34\x6c\x2d\x30\x2e\x32\x34\x31\
\x2d\x32\x2e\x33\x33\x37\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x30\x31\
\x36\x2d\x30\x2e\x33\x37\x2c\x30\x2e\x31\x36\x31\x2d\x30\x2e\x35\
\x39\x35\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x36\x37\x35\x6c\x31\x2e\
\x39\x32\x37\x2d\x30\x2e\x33\x38\x36\x76\x2d\x33\x2e\x36\x36\x32\
\x68\x2d\x32\x2e\x31\x36\x38\x63\x2d\x30\x2e\x33\x37\x2c\x30\x2d\
\x30\x2e\x35\x32\x32\x2d\x30\x2e\x31\x37\x37\x2d\x30\x2e\x34\x35\
\x38\x2d\x30\x2e\x35\x33\x6c\x30\x2e\x34\x31\x2d\x32\x2e\x31\x36\
\x38\x0d\x0a\x09\x09\x63\x30\x2e\x30\x36\x34\x2d\x30\x2e\x33\x35\
\x33\x2c\x30\x2e\x32\x38\x31\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x36\
\x35\x2d\x30\x2e\x35\x33\x68\x31\x2e\x35\x36\x36\x56\x36\x2e\x32\
\x38\x37\x63\x30\x2d\x30\x2e\x33\x37\x2c\x30\x2e\x31\x37\x37\x2d\
\x30\x2e\x35\x33\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x34\x38\x32\x6c\
\x32\x2e\x33\x36\x31\x2c\x30\x2e\x34\x35\x38\x63\x30\x2e\x33\x35\
\x33\x2c\x30\x2e\x30\x34\x38\x2c\x30\x2e\x35\x33\x2c\x30\x2e\x32\
\x35\x37\x2c\x30\x2e\x35\x33\x2c\x30\x2e\x36\x32\x36\x76\x33\x2e\
\x33\x30\x31\x0d\x0a\x09\x09\x68\x32\x2e\x30\x39\x36\x63\x30\x2e\
\x33\x36\x39\x2c\x30\x2c\x30\x2e\x35\x32\x31\x2c\x30\x2e\x31\x37\
\x37\x2c\x30\x2e\x34\x35\x38\x2c\x30\x2e\x35\x33\x6c\x2d\x30\x2e\
\x34\x31\x2c\x32\x2e\x31\x36\x38\x63\x2d\x30\x2e\x30\x36\x34\x2c\
\x30\x2e\x33\x35\x34\x2d\x30\x2e\x32\x38\x31\x2c\x30\x2e\x35\x33\
\x2d\x30\x2e\x36\x35\x2c\x30\x2e\x35\x33\x68\x2d\x31\x2e\x34\x39\
\x34\x76\x32\x2e\x38\x39\x31\x4c\x34\x34\x2e\x35\x37\x36\x2c\x31\
\x35\x2e\x38\x37\x36\x7a\x20\x4d\x35\x31\x2e\x34\x39\x31\x2c\x31\
\x35\x2e\x32\x35\x0d\x0a\x09\x09\x63\x30\x2e\x32\x37\x33\x2c\x30\
\x2e\x32\x32\x35\x2c\x30\x2e\x32\x38\x39\x2c\x30\x2e\x34\x36\x36\
\x2c\x30\x2e\x30\x34\x38\x2c\x30\x2e\x37\x32\x33\x6c\x2d\x31\x2e\
\x35\x36\x35\x2c\x31\x2e\x36\x38\x38\x63\x2d\x30\x2e\x32\x34\x31\
\x2c\x30\x2e\x32\x35\x36\x2d\x30\x2e\x34\x37\x35\x2c\x30\x2e\x32\
\x38\x31\x2d\x30\x2e\x36\x39\x39\x2c\x30\x2e\x30\x37\x32\x63\x2d\
\x30\x2e\x33\x35\x34\x2d\x30\x2e\x34\x31\x38\x2d\x31\x2e\x33\x38\
\x32\x2d\x31\x2e\x31\x31\x37\x2d\x33\x2e\x30\x38\x34\x2d\x32\x2e\
\x30\x39\x37\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x34\x31\x38\x2d\x30\
\x2e\x32\x35\x37\x2d\x30\x2e\x34\x39\x2d\x30\x2e\x34\x39\x38\x2d\
\x30\x2e\x32\x31\x37\x2d\x30\x2e\x37\x32\x33\x6c\x31\x2e\x36\x36\
\x32\x2d\x31\x2e\x34\x34\x36\x63\x30\x2e\x32\x37\x33\x2d\x30\x2e\
\x32\x32\x35\x2c\x30\x2e\x36\x31\x2d\x30\x2e\x32\x33\x32\x2c\x31\
\x2e\x30\x31\x33\x2d\x30\x2e\x30\x32\x34\x43\x34\x39\x2e\x38\x38\
\x35\x2c\x31\x34\x2e\x30\x35\x34\x2c\x35\x30\x2e\x38\x33\x32\x2c\
\x31\x34\x2e\x36\x35\x36\x2c\x35\x31\x2e\x34\x39\x31\x2c\x31\x35\
\x2e\x32\x35\x7a\x0d\x0a\x09\x09\x20\x4d\x35\x38\x2e\x39\x31\x32\
\x2c\x31\x38\x2e\x39\x36\x31\x76\x32\x2e\x32\x31\x37\x63\x30\x2c\
\x30\x2e\x33\x36\x39\x2d\x30\x2e\x31\x38\x35\x2c\x30\x2e\x35\x38\
\x36\x2d\x30\x2e\x35\x35\x34\x2c\x30\x2e\x36\x35\x6c\x2d\x31\x2e\
\x39\x32\x38\x2c\x30\x2e\x33\x33\x37\x76\x35\x2e\x38\x38\x63\x30\
\x2c\x30\x2e\x33\x36\x39\x2d\x30\x2e\x31\x37\x37\x2c\x30\x2e\x35\
\x32\x39\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x34\x38\x6c\x2d\x32\x2e\
\x34\x30\x39\x2d\x30\x2e\x34\x35\x37\x0d\x0a\x09\x09\x63\x2d\x30\
\x2e\x33\x35\x34\x2d\x30\x2e\x30\x34\x39\x2d\x30\x2e\x35\x33\x2d\
\x30\x2e\x32\x35\x38\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x36\x32\x37\
\x76\x2d\x34\x2e\x36\x39\x37\x6c\x2d\x36\x2e\x35\x30\x36\x2c\x31\
\x2e\x31\x33\x32\x63\x2d\x30\x2e\x33\x35\x34\x2c\x30\x2e\x30\x34\
\x38\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x31\x31\x32\x2d\x30\x2e\x35\
\x33\x2d\x30\x2e\x34\x38\x31\x6c\x2d\x30\x2e\x30\x34\x38\x2d\x32\
\x2e\x33\x31\x33\x0d\x0a\x09\x09\x63\x30\x2d\x30\x2e\x33\x37\x2c\
\x30\x2e\x31\x38\x35\x2d\x30\x2e\x35\x37\x39\x2c\x30\x2e\x35\x35\
\x34\x2d\x30\x2e\x36\x32\x36\x6c\x36\x2e\x35\x32\x39\x2d\x31\x2e\
\x30\x38\x35\x56\x36\x2e\x32\x36\x33\x63\x30\x2d\x30\x2e\x33\x36\
\x39\x2c\x30\x2e\x31\x37\x37\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x35\
\x33\x2d\x30\x2e\x34\x38\x32\x4c\x35\x35\x2e\x39\x2c\x36\x2e\x32\
\x33\x39\x63\x30\x2e\x33\x35\x34\x2c\x30\x2e\x30\x34\x38\x2c\x30\
\x2e\x35\x33\x2c\x30\x2e\x32\x35\x37\x2c\x30\x2e\x35\x33\x2c\x30\
\x2e\x36\x32\x36\x0d\x0a\x09\x09\x76\x31\x31\x2e\x39\x35\x31\x6c\
\x31\x2e\x39\x35\x32\x2d\x30\x2e\x33\x33\x38\x43\x35\x38\x2e\x37\
\x33\x35\x2c\x31\x38\x2e\x34\x33\x31\x2c\x35\x38\x2e\x39\x31\x32\
\x2c\x31\x38\x2e\x35\x39\x32\x2c\x35\x38\x2e\x39\x31\x32\x2c\x31\
\x38\x2e\x39\x36\x31\x7a\x20\x4d\x35\x32\x2e\x34\x35\x35\x2c\x39\
\x2e\x33\x39\x35\x63\x30\x2e\x32\x35\x37\x2c\x30\x2e\x32\x34\x31\
\x2c\x30\x2e\x32\x35\x37\x2c\x30\x2e\x34\x39\x2c\x30\x2c\x30\x2e\
\x37\x34\x37\x6c\x2d\x31\x2e\x35\x36\x36\x2c\x31\x2e\x34\x39\x34\
\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x32\x35\x37\x2c\x30\x2e\x32\x35\
\x37\x2d\x30\x2e\x35\x30\x36\x2c\x30\x2e\x32\x37\x33\x2d\x30\x2e\
\x37\x34\x37\x2c\x30\x2e\x30\x34\x38\x63\x2d\x30\x2e\x33\x36\x39\
\x2d\x30\x2e\x34\x39\x38\x2d\x31\x2e\x33\x31\x36\x2d\x31\x2e\x32\
\x30\x35\x2d\x32\x2e\x38\x34\x33\x2d\x32\x2e\x31\x32\x63\x2d\x30\
\x2e\x33\x38\x35\x2d\x30\x2e\x32\x35\x37\x2d\x30\x2e\x34\x33\x34\
\x2d\x30\x2e\x34\x39\x38\x2d\x30\x2e\x31\x34\x35\x2d\x30\x2e\x37\
\x32\x33\x6c\x31\x2e\x36\x36\x32\x2d\x31\x2e\x33\x30\x31\x0d\x0a\
\x09\x09\x63\x30\x2e\x32\x39\x2d\x30\x2e\x32\x32\x35\x2c\x30\x2e\
\x36\x32\x37\x2d\x30\x2e\x32\x32\x35\x2c\x31\x2e\x30\x31\x33\x2c\
\x30\x43\x35\x31\x2e\x30\x31\x38\x2c\x38\x2e\x31\x36\x36\x2c\x35\
\x31\x2e\x38\x39\x33\x2c\x38\x2e\x37\x38\x35\x2c\x35\x32\x2e\x34\
\x35\x35\x2c\x39\x2e\x33\x39\x35\x7a\x22\x2f\x3e\x0d\x0a\x09\x3c\
\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\x46\x46\
\x46\x46\x22\x20\x64\x3d\x22\x4d\x37\x38\x2e\x34\x37\x37\x2c\x31\
\x33\x2e\x32\x35\x68\x34\x2e\x31\x36\x39\x63\x30\x2e\x33\x36\x39\
\x2c\x30\x2c\x30\x2e\x35\x32\x31\x2c\x30\x2e\x31\x37\x37\x2c\x30\
\x2e\x34\x35\x38\x2c\x30\x2e\x35\x33\x6c\x2d\x30\x2e\x34\x31\x2c\
\x32\x2e\x31\x34\x35\x63\x2d\x30\x2e\x30\x36\x34\x2c\x30\x2e\x33\
\x35\x34\x2d\x30\x2e\x32\x38\x31\x2c\x30\x2e\x35\x33\x2d\x30\x2e\
\x36\x35\x2c\x30\x2e\x35\x33\x48\x36\x31\x2e\x32\x30\x31\x0d\x0a\
\x09\x09\x63\x2d\x30\x2e\x33\x37\x2c\x30\x2d\x30\x2e\x35\x32\x32\
\x2d\x30\x2e\x31\x37\x36\x2d\x30\x2e\x34\x35\x38\x2d\x30\x2e\x35\
\x33\x6c\x30\x2e\x34\x31\x2d\x32\x2e\x31\x34\x35\x63\x30\x2e\x30\
\x36\x33\x2d\x30\x2e\x33\x35\x33\x2c\x30\x2e\x32\x38\x2d\x30\x2e\
\x35\x33\x2c\x30\x2e\x36\x35\x2d\x30\x2e\x35\x33\x68\x33\x2e\x38\
\x37\x39\x6c\x2d\x30\x2e\x32\x31\x37\x2d\x30\x2e\x35\x35\x34\x0d\
\x0a\x09\x09\x63\x2d\x30\x2e\x33\x36\x39\x2d\x30\x2e\x39\x37\x39\
\x2d\x30\x2e\x30\x34\x2d\x31\x2e\x35\x35\x38\x2c\x30\x2e\x39\x38\
\x38\x2d\x31\x2e\x37\x33\x34\x6c\x30\x2e\x37\x32\x33\x2d\x30\x2e\
\x31\x34\x35\x68\x2d\x34\x2e\x36\x30\x33\x63\x2d\x30\x2e\x33\x36\
\x39\x2c\x30\x2d\x30\x2e\x35\x32\x31\x2d\x30\x2e\x31\x37\x36\x2d\
\x30\x2e\x34\x35\x37\x2d\x30\x2e\x35\x33\x6c\x30\x2e\x34\x30\x39\
\x2d\x32\x2e\x31\x34\x35\x0d\x0a\x09\x09\x63\x30\x2e\x30\x36\x34\
\x2d\x30\x2e\x33\x35\x33\x2c\x30\x2e\x32\x38\x31\x2d\x30\x2e\x35\
\x33\x2c\x30\x2e\x36\x35\x2d\x30\x2e\x35\x33\x68\x36\x2e\x39\x33\
\x39\x6c\x2d\x30\x2e\x34\x38\x32\x2d\x31\x2e\x31\x33\x33\x63\x2d\
\x30\x2e\x31\x32\x38\x2d\x30\x2e\x33\x33\x37\x2d\x30\x2e\x30\x30\
\x38\x2d\x30\x2e\x35\x30\x36\x2c\x30\x2e\x33\x36\x32\x2d\x30\x2e\
\x35\x30\x36\x6c\x32\x2e\x39\x36\x33\x2d\x30\x2e\x30\x32\x34\x0d\
\x0a\x09\x09\x63\x30\x2e\x33\x36\x39\x2c\x30\x2c\x30\x2e\x36\x32\
\x37\x2c\x30\x2e\x31\x37\x37\x2c\x30\x2e\x37\x37\x31\x2c\x30\x2e\
\x35\x33\x6c\x30\x2e\x35\x30\x36\x2c\x31\x2e\x31\x33\x33\x68\x37\
\x2e\x33\x32\x34\x63\x30\x2e\x33\x37\x2c\x30\x2c\x30\x2e\x35\x32\
\x32\x2c\x30\x2e\x31\x37\x37\x2c\x30\x2e\x34\x35\x38\x2c\x30\x2e\
\x35\x33\x6c\x2d\x30\x2e\x34\x30\x39\x2c\x32\x2e\x31\x34\x35\x63\
\x2d\x30\x2e\x30\x36\x34\x2c\x30\x2e\x33\x35\x34\x2d\x30\x2e\x32\
\x38\x31\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x36\x35\x2c\x30\x2e\x35\
\x33\x0d\x0a\x09\x09\x48\x37\x37\x2e\x32\x6c\x30\x2e\x36\x37\x35\
\x2c\x30\x2e\x31\x34\x35\x63\x31\x2e\x30\x32\x37\x2c\x30\x2e\x31\
\x39\x32\x2c\x31\x2e\x33\x32\x35\x2c\x30\x2e\x37\x36\x33\x2c\x30\
\x2e\x38\x39\x31\x2c\x31\x2e\x37\x31\x4c\x37\x38\x2e\x34\x37\x37\
\x2c\x31\x33\x2e\x32\x35\x7a\x20\x4d\x36\x34\x2e\x38\x36\x33\x2c\
\x32\x38\x2e\x36\x32\x33\x63\x2d\x30\x2e\x38\x38\x34\x2c\x30\x2d\
\x31\x2e\x33\x32\x35\x2d\x30\x2e\x34\x34\x32\x2d\x31\x2e\x33\x32\
\x35\x2d\x31\x2e\x33\x32\x36\x76\x2d\x38\x2e\x33\x33\x36\x0d\x0a\
\x09\x09\x63\x30\x2d\x30\x2e\x38\x38\x34\x2c\x30\x2e\x34\x34\x31\
\x2d\x31\x2e\x33\x32\x36\x2c\x31\x2e\x33\x32\x35\x2d\x31\x2e\x33\
\x32\x36\x68\x31\x34\x2e\x34\x30\x39\x63\x30\x2e\x38\x38\x33\x2c\
\x30\x2c\x31\x2e\x33\x32\x35\x2c\x30\x2e\x34\x34\x32\x2c\x31\x2e\
\x33\x32\x35\x2c\x31\x2e\x33\x32\x36\x76\x38\x2e\x33\x33\x36\x63\
\x30\x2c\x30\x2e\x38\x38\x34\x2d\x30\x2e\x34\x34\x32\x2c\x31\x2e\
\x33\x32\x36\x2d\x31\x2e\x33\x32\x35\x2c\x31\x2e\x33\x32\x36\x48\
\x36\x34\x2e\x38\x36\x33\x7a\x0d\x0a\x09\x09\x20\x4d\x37\x36\x2e\
\x37\x36\x37\x2c\x32\x31\x2e\x32\x39\x38\x63\x30\x2d\x30\x2e\x34\
\x36\x36\x2d\x30\x2e\x32\x33\x33\x2d\x30\x2e\x36\x39\x38\x2d\x30\
\x2e\x36\x39\x39\x2d\x30\x2e\x36\x39\x38\x68\x2d\x38\x2e\x31\x39\
\x32\x63\x2d\x30\x2e\x34\x36\x36\x2c\x30\x2d\x30\x2e\x36\x39\x38\
\x2c\x30\x2e\x32\x33\x32\x2d\x30\x2e\x36\x39\x38\x2c\x30\x2e\x36\
\x39\x38\x76\x30\x2e\x34\x35\x38\x68\x39\x2e\x35\x39\x56\x32\x31\
\x2e\x32\x39\x38\x7a\x20\x4d\x36\x37\x2e\x31\x37\x37\x2c\x32\x34\
\x2e\x39\x38\x34\x0d\x0a\x09\x09\x63\x30\x2c\x30\x2e\x34\x36\x36\
\x2c\x30\x2e\x32\x33\x32\x2c\x30\x2e\x36\x39\x39\x2c\x30\x2e\x36\
\x39\x38\x2c\x30\x2e\x36\x39\x39\x68\x38\x2e\x31\x39\x32\x63\x30\
\x2e\x34\x36\x36\x2c\x30\x2c\x30\x2e\x36\x39\x39\x2d\x30\x2e\x32\
\x33\x33\x2c\x30\x2e\x36\x39\x39\x2d\x30\x2e\x36\x39\x39\x56\x32\
\x34\x2e\x34\x33\x68\x2d\x39\x2e\x35\x39\x56\x32\x34\x2e\x39\x38\
\x34\x7a\x20\x4d\x37\x35\x2e\x32\x34\x38\x2c\x31\x30\x2e\x38\x31\
\x36\x68\x2d\x36\x2e\x35\x37\x37\x6c\x30\x2e\x39\x38\x37\x2c\x32\
\x2e\x34\x33\x34\x0d\x0a\x09\x09\x68\x34\x2e\x38\x36\x37\x4c\x37\
\x35\x2e\x32\x34\x38\x2c\x31\x30\x2e\x38\x31\x36\x7a\x22\x2f\x3e\
\x0d\x0a\x09\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\
\x46\x46\x46\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x31\x30\x37\x2e\
\x31\x30\x32\x2c\x31\x35\x2e\x36\x33\x36\x6c\x2d\x30\x2e\x34\x35\
\x38\x2c\x32\x2e\x34\x38\x31\x63\x2d\x30\x2e\x30\x34\x38\x2c\x30\
\x2e\x33\x35\x34\x2d\x30\x2e\x32\x35\x37\x2c\x30\x2e\x35\x33\x2d\
\x30\x2e\x36\x32\x36\x2c\x30\x2e\x35\x33\x48\x39\x35\x2e\x37\x35\
\x33\x0d\x0a\x09\x09\x63\x2d\x31\x2e\x31\x35\x36\x2c\x31\x2e\x38\
\x2d\x32\x2e\x33\x35\x34\x2c\x33\x2e\x33\x37\x33\x2d\x33\x2e\x35\
\x39\x2c\x34\x2e\x37\x32\x33\x63\x2d\x30\x2e\x33\x30\x36\x2c\x30\
\x2e\x33\x35\x33\x2d\x30\x2e\x32\x33\x33\x2c\x30\x2e\x35\x32\x32\
\x2c\x30\x2e\x32\x31\x37\x2c\x30\x2e\x35\x30\x36\x6c\x37\x2e\x38\
\x35\x34\x2d\x30\x2e\x34\x33\x34\x6c\x2d\x31\x2e\x38\x37\x39\x2d\
\x32\x2e\x36\x32\x36\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x32\x30\x39\
\x2d\x30\x2e\x32\x38\x39\x2d\x30\x2e\x31\x33\x37\x2d\x30\x2e\x34\
\x37\x35\x2c\x30\x2e\x32\x31\x37\x2d\x30\x2e\x35\x35\x35\x6c\x32\
\x2e\x35\x35\x34\x2d\x30\x2e\x37\x32\x33\x63\x30\x2e\x33\x35\x34\
\x2d\x30\x2e\x30\x38\x2c\x30\x2e\x36\x34\x33\x2c\x30\x2e\x30\x31\
\x36\x2c\x30\x2e\x38\x36\x37\x2c\x30\x2e\x32\x38\x39\x63\x30\x2e\
\x35\x36\x33\x2c\x30\x2e\x36\x34\x33\x2c\x31\x2e\x33\x33\x33\x2c\
\x31\x2e\x36\x39\x35\x2c\x32\x2e\x33\x31\x33\x2c\x33\x2e\x31\x35\
\x36\x0d\x0a\x09\x09\x63\x30\x2e\x39\x37\x39\x2c\x31\x2e\x34\x34\
\x35\x2c\x31\x2e\x36\x36\x32\x2c\x32\x2e\x35\x38\x36\x2c\x32\x2e\
\x30\x34\x38\x2c\x33\x2e\x34\x32\x32\x63\x30\x2e\x31\x34\x35\x2c\
\x30\x2e\x32\x38\x39\x2c\x30\x2e\x30\x34\x38\x2c\x30\x2e\x34\x38\
\x39\x2d\x30\x2e\x32\x38\x39\x2c\x30\x2e\x36\x30\x32\x6c\x2d\x32\
\x2e\x36\x39\x38\x2c\x30\x2e\x38\x39\x32\x63\x2d\x30\x2e\x33\x33\
\x38\x2c\x30\x2e\x31\x31\x32\x2d\x30\x2e\x35\x39\x35\x2c\x30\x2d\
\x30\x2e\x37\x37\x31\x2d\x30\x2e\x33\x33\x37\x0d\x0a\x09\x09\x6c\
\x2d\x30\x2e\x34\x33\x34\x2d\x30\x2e\x39\x31\x36\x63\x2d\x38\x2e\
\x30\x31\x36\x2c\x30\x2e\x36\x31\x2d\x31\x32\x2e\x34\x39\x37\x2c\
\x30\x2e\x39\x38\x38\x2d\x31\x33\x2e\x34\x34\x34\x2c\x31\x2e\x31\
\x33\x33\x6c\x2d\x30\x2e\x39\x34\x2c\x30\x2e\x32\x34\x63\x2d\x30\
\x2e\x33\x35\x34\x2c\x30\x2e\x30\x38\x2d\x30\x2e\x35\x36\x33\x2d\
\x30\x2e\x30\x35\x36\x2d\x30\x2e\x36\x32\x36\x2d\x30\x2e\x34\x30\
\x39\x6c\x2d\x30\x2e\x35\x37\x38\x2d\x32\x2e\x35\x33\x0d\x0a\x09\
\x09\x63\x2d\x30\x2e\x30\x36\x34\x2d\x30\x2e\x33\x35\x33\x2c\x30\
\x2e\x30\x34\x2d\x30\x2e\x36\x32\x36\x2c\x30\x2e\x33\x31\x33\x2d\
\x30\x2e\x38\x31\x38\x63\x2d\x30\x2e\x30\x34\x38\x2c\x30\x2e\x31\
\x36\x2c\x30\x2e\x33\x30\x36\x2d\x30\x2e\x31\x36\x2c\x31\x2e\x30\
\x36\x31\x2d\x30\x2e\x39\x36\x35\x63\x30\x2e\x33\x33\x37\x2d\x30\
\x2e\x33\x33\x36\x2c\x30\x2e\x38\x33\x35\x2d\x30\x2e\x39\x34\x37\
\x2c\x31\x2e\x34\x39\x34\x2d\x31\x2e\x38\x33\x6c\x31\x2e\x38\x33\
\x31\x2d\x32\x2e\x38\x31\x39\x68\x2d\x35\x2e\x37\x38\x33\x0d\x0a\
\x09\x09\x63\x2d\x30\x2e\x33\x36\x39\x2c\x30\x2d\x30\x2e\x35\x32\
\x39\x2d\x30\x2e\x31\x37\x37\x2d\x30\x2e\x34\x38\x31\x2d\x30\x2e\
\x35\x33\x6c\x30\x2e\x34\x35\x38\x2d\x32\x2e\x34\x38\x31\x63\x30\
\x2e\x30\x34\x38\x2d\x30\x2e\x33\x35\x34\x2c\x30\x2e\x32\x35\x37\
\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x36\x32\x36\x2d\x30\x2e\x35\x33\
\x68\x38\x2e\x30\x34\x38\x76\x2d\x32\x2e\x37\x39\x35\x68\x2d\x36\
\x2e\x37\x39\x35\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x33\x36\x39\x2c\
\x30\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x31\x37\x37\x2d\x30\x2e\x34\
\x38\x31\x2d\x30\x2e\x35\x33\x6c\x30\x2e\x34\x33\x34\x2d\x32\x2e\
\x34\x35\x38\x63\x30\x2e\x30\x34\x38\x2d\x30\x2e\x33\x35\x33\x2c\
\x30\x2e\x32\x36\x35\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x36\x35\x2d\
\x30\x2e\x35\x33\x68\x36\x2e\x31\x39\x32\x76\x2d\x32\x2e\x35\x33\
\x63\x30\x2d\x30\x2e\x33\x36\x39\x2c\x30\x2e\x31\x37\x37\x2d\x30\
\x2e\x35\x33\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x34\x38\x32\x0d\x0a\
\x09\x09\x6c\x32\x2e\x36\x37\x34\x2c\x30\x2e\x34\x35\x38\x63\x30\
\x2e\x33\x35\x34\x2c\x30\x2e\x30\x34\x38\x2c\x30\x2e\x35\x33\x2c\
\x30\x2e\x32\x35\x37\x2c\x30\x2e\x35\x33\x2c\x30\x2e\x36\x32\x36\
\x76\x31\x2e\x39\x32\x37\x68\x37\x2e\x30\x33\x36\x63\x30\x2e\x33\
\x36\x39\x2c\x30\x2c\x30\x2e\x35\x32\x39\x2c\x30\x2e\x31\x37\x37\
\x2c\x30\x2e\x34\x38\x31\x2c\x30\x2e\x35\x33\x6c\x2d\x30\x2e\x34\
\x35\x38\x2c\x32\x2e\x34\x35\x38\x0d\x0a\x09\x09\x63\x2d\x30\x2e\
\x30\x34\x38\x2c\x30\x2e\x33\x35\x34\x2d\x30\x2e\x32\x35\x37\x2c\
\x30\x2e\x35\x33\x2d\x30\x2e\x36\x32\x36\x2c\x30\x2e\x35\x33\x68\
\x2d\x36\x2e\x34\x33\x34\x76\x32\x2e\x37\x39\x35\x68\x38\x2e\x37\
\x34\x36\x43\x31\x30\x36\x2e\x39\x38\x39\x2c\x31\x35\x2e\x31\x30\
\x35\x2c\x31\x30\x37\x2e\x31\x34\x39\x2c\x31\x35\x2e\x32\x38\x32\
\x2c\x31\x30\x37\x2e\x31\x30\x32\x2c\x31\x35\x2e\x36\x33\x36\x7a\
\x22\x2f\x3e\x0d\x0a\x09\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\
\x3d\x22\x23\x46\x46\x46\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x31\
\x31\x35\x2e\x31\x39\x37\x2c\x31\x31\x2e\x33\x37\x31\x63\x30\x2e\
\x33\x36\x39\x2c\x30\x2c\x30\x2e\x37\x30\x37\x2c\x30\x2e\x31\x30\
\x35\x2c\x31\x2e\x30\x31\x32\x2c\x30\x2e\x33\x31\x33\x6c\x31\x2e\
\x33\x30\x32\x2c\x30\x2e\x38\x39\x32\x63\x30\x2e\x33\x30\x35\x2c\
\x30\x2e\x32\x30\x39\x2c\x30\x2e\x34\x32\x35\x2c\x30\x2e\x35\x30\
\x36\x2c\x30\x2e\x33\x36\x31\x2c\x30\x2e\x38\x39\x31\x0d\x0a\x09\
\x09\x63\x2d\x30\x2e\x33\x37\x2c\x32\x2e\x35\x30\x36\x2d\x31\x2e\
\x31\x35\x36\x2c\x35\x2e\x30\x30\x34\x2d\x32\x2e\x33\x36\x31\x2c\
\x37\x2e\x34\x39\x34\x63\x2d\x31\x2e\x31\x38\x39\x2c\x32\x2e\x34\
\x37\x33\x2d\x32\x2e\x34\x39\x38\x2c\x34\x2e\x32\x35\x36\x2d\x33\
\x2e\x39\x32\x38\x2c\x35\x2e\x33\x34\x39\x63\x2d\x30\x2e\x32\x35\
\x37\x2c\x30\x2e\x32\x30\x38\x2d\x30\x2e\x34\x39\x38\x2c\x30\x2e\
\x31\x37\x37\x2d\x30\x2e\x37\x32\x33\x2d\x30\x2e\x30\x39\x37\x6c\
\x2d\x31\x2e\x37\x33\x34\x2d\x32\x2e\x31\x39\x32\x0d\x0a\x09\x09\
\x63\x2d\x30\x2e\x32\x32\x36\x2d\x30\x2e\x32\x37\x32\x2d\x30\x2e\
\x32\x30\x31\x2d\x30\x2e\x35\x32\x32\x2c\x30\x2e\x30\x37\x32\x2d\
\x30\x2e\x37\x34\x37\x63\x32\x2e\x30\x38\x38\x2d\x31\x2e\x34\x31\
\x33\x2c\x33\x2e\x35\x36\x35\x2d\x33\x2e\x38\x36\x33\x2c\x34\x2e\
\x34\x33\x33\x2d\x37\x2e\x33\x34\x39\x63\x30\x2e\x31\x36\x31\x2d\
\x30\x2e\x35\x39\x34\x2c\x30\x2e\x30\x30\x39\x2d\x30\x2e\x38\x39\
\x32\x2d\x30\x2e\x34\x35\x37\x2d\x30\x2e\x38\x39\x32\x68\x2d\x33\
\x2e\x33\x35\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x33\x36\x39\x2c\x30\
\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x31\x37\x37\x2d\x30\x2e\x34\x38\
\x31\x2d\x30\x2e\x35\x33\x6c\x30\x2e\x34\x35\x37\x2d\x32\x2e\x34\
\x38\x31\x63\x30\x2e\x30\x34\x39\x2d\x30\x2e\x33\x35\x33\x2c\x30\
\x2e\x32\x35\x37\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x36\x32\x37\x2d\
\x30\x2e\x35\x33\x4c\x31\x31\x35\x2e\x31\x39\x37\x2c\x31\x31\x2e\
\x33\x37\x31\x7a\x20\x4d\x31\x32\x36\x2e\x38\x31\x31\x2c\x31\x30\
\x2e\x31\x39\x0d\x0a\x09\x09\x63\x30\x2e\x32\x30\x39\x2d\x30\x2e\
\x32\x37\x33\x2c\x30\x2e\x34\x34\x32\x2d\x30\x2e\x32\x38\x31\x2c\
\x30\x2e\x36\x39\x39\x2d\x30\x2e\x30\x32\x34\x6c\x31\x2e\x39\x37\
\x36\x2c\x32\x63\x30\x2e\x32\x35\x37\x2c\x30\x2e\x32\x35\x37\x2c\
\x30\x2e\x32\x34\x31\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x30\x34\x38\
\x2c\x30\x2e\x38\x31\x39\x63\x2d\x31\x2e\x34\x33\x2c\x31\x2e\x33\
\x39\x37\x2d\x33\x2e\x30\x32\x31\x2c\x32\x2e\x37\x36\x33\x2d\x34\
\x2e\x37\x37\x31\x2c\x34\x2e\x30\x39\x36\x0d\x0a\x09\x09\x63\x31\
\x2e\x38\x36\x32\x2c\x32\x2e\x38\x34\x33\x2c\x34\x2e\x30\x38\x37\
\x2c\x34\x2e\x38\x32\x37\x2c\x36\x2e\x36\x37\x34\x2c\x35\x2e\x39\
\x35\x32\x63\x30\x2e\x33\x35\x33\x2c\x30\x2e\x31\x34\x35\x2c\x30\
\x2e\x34\x30\x31\x2c\x30\x2e\x33\x33\x37\x2c\x30\x2e\x31\x34\x35\
\x2c\x30\x2e\x35\x37\x38\x4c\x31\x32\x39\x2e\x31\x2c\x32\x35\x2e\
\x39\x63\x2d\x30\x2e\x32\x35\x37\x2c\x30\x2e\x32\x34\x2d\x30\x2e\
\x35\x33\x38\x2c\x30\x2e\x32\x36\x35\x2d\x30\x2e\x38\x34\x33\x2c\
\x30\x2e\x30\x37\x32\x0d\x0a\x09\x09\x63\x2d\x32\x2e\x32\x34\x39\
\x2d\x31\x2e\x31\x37\x33\x2d\x34\x2e\x32\x39\x37\x2d\x33\x2e\x34\
\x31\x34\x2d\x36\x2e\x31\x34\x35\x2d\x36\x2e\x37\x32\x33\x76\x35\
\x2e\x30\x31\x32\x63\x30\x2c\x30\x2e\x39\x39\x36\x2d\x30\x2e\x31\
\x30\x34\x2c\x31\x2e\x37\x36\x37\x2d\x30\x2e\x33\x31\x33\x2c\x32\
\x2e\x33\x31\x33\x63\x2d\x30\x2e\x32\x30\x38\x2c\x30\x2e\x35\x34\
\x36\x2d\x30\x2e\x35\x37\x38\x2c\x30\x2e\x39\x37\x32\x2d\x31\x2e\
\x31\x30\x37\x2c\x31\x2e\x32\x37\x37\x0d\x0a\x09\x09\x63\x2d\x30\
\x2e\x39\x33\x33\x2c\x30\x2e\x35\x32\x39\x2d\x32\x2e\x32\x35\x38\
\x2c\x30\x2e\x37\x38\x37\x2d\x33\x2e\x39\x37\x36\x2c\x30\x2e\x37\
\x37\x31\x63\x2d\x30\x2e\x34\x38\x32\x2c\x30\x2e\x30\x31\x36\x2d\
\x30\x2e\x37\x35\x36\x2d\x30\x2e\x31\x35\x33\x2d\x30\x2e\x38\x31\
\x39\x2d\x30\x2e\x35\x30\x36\x6c\x2d\x30\x2e\x36\x30\x33\x2d\x32\
\x2e\x38\x32\x63\x2d\x30\x2e\x30\x36\x34\x2d\x30\x2e\x33\x35\x33\
\x2c\x30\x2e\x30\x38\x2d\x30\x2e\x35\x32\x39\x2c\x30\x2e\x34\x33\
\x34\x2d\x30\x2e\x35\x32\x39\x0d\x0a\x09\x09\x63\x30\x2e\x31\x37\
\x37\x2c\x30\x2e\x30\x31\x36\x2c\x30\x2e\x38\x36\x37\x2c\x30\x2e\
\x30\x32\x33\x2c\x32\x2e\x30\x37\x32\x2c\x30\x2e\x30\x32\x33\x63\
\x30\x2e\x33\x38\x36\x2c\x30\x2c\x30\x2e\x35\x37\x38\x2d\x30\x2e\
\x31\x38\x34\x2c\x30\x2e\x35\x37\x38\x2d\x30\x2e\x35\x35\x34\x56\
\x36\x2e\x32\x38\x37\x63\x30\x2d\x30\x2e\x33\x37\x2c\x30\x2e\x31\
\x37\x37\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x34\
\x38\x32\x6c\x32\x2e\x36\x37\x34\x2c\x30\x2e\x34\x35\x38\x0d\x0a\
\x09\x09\x63\x30\x2e\x33\x35\x34\x2c\x30\x2e\x30\x34\x38\x2c\x30\
\x2e\x35\x33\x2c\x30\x2e\x32\x35\x37\x2c\x30\x2e\x35\x33\x2c\x30\
\x2e\x36\x32\x36\x76\x34\x2e\x37\x39\x35\x63\x30\x2e\x32\x34\x31\
\x2c\x30\x2e\x37\x35\x35\x2c\x30\x2e\x35\x37\x2c\x31\x2e\x36\x31\
\x34\x2c\x30\x2e\x39\x38\x38\x2c\x32\x2e\x35\x37\x38\x43\x31\x32\
\x34\x2e\x37\x33\x39\x2c\x31\x32\x2e\x37\x33\x36\x2c\x31\x32\x35\
\x2e\x39\x37\x36\x2c\x31\x31\x2e\x33\x37\x39\x2c\x31\x32\x36\x2e\
\x38\x31\x31\x2c\x31\x30\x2e\x31\x39\x7a\x22\x2f\x3e\x0d\x0a\x09\
\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\x46\
\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x31\x34\x30\x2e\x38\x35\x38\
\x2c\x36\x2e\x35\x35\x32\x63\x30\x2e\x33\x30\x35\x2d\x30\x2e\x31\
\x34\x35\x2c\x30\x2e\x35\x36\x32\x2d\x30\x2e\x30\x37\x32\x2c\x30\
\x2e\x37\x37\x31\x2c\x30\x2e\x32\x31\x37\x6c\x31\x2e\x34\x39\x34\
\x2c\x32\x2e\x31\x39\x33\x63\x30\x2e\x32\x30\x39\x2c\x30\x2e\x32\
\x38\x39\x2c\x30\x2e\x31\x36\x2c\x30\x2e\x34\x38\x32\x2d\x30\x2e\
\x31\x34\x35\x2c\x30\x2e\x35\x37\x38\x0d\x0a\x09\x09\x63\x2d\x30\
\x2e\x39\x36\x34\x2c\x30\x2e\x33\x37\x2d\x32\x2e\x33\x36\x39\x2c\
\x30\x2e\x37\x36\x33\x2d\x34\x2e\x32\x31\x37\x2c\x31\x2e\x31\x38\
\x31\x63\x2d\x30\x2e\x36\x31\x2c\x30\x2e\x31\x32\x39\x2d\x30\x2e\
\x39\x31\x35\x2c\x30\x2e\x34\x32\x36\x2d\x30\x2e\x39\x31\x35\x2c\
\x30\x2e\x38\x39\x32\x76\x32\x2e\x30\x39\x36\x68\x34\x2e\x39\x31\
\x35\x63\x30\x2e\x33\x36\x39\x2c\x30\x2c\x30\x2e\x35\x32\x31\x2c\
\x30\x2e\x31\x37\x37\x2c\x30\x2e\x34\x35\x38\x2c\x30\x2e\x35\x33\
\x0d\x0a\x09\x09\x6c\x2d\x30\x2e\x34\x31\x2c\x32\x2e\x33\x31\x33\
\x63\x2d\x30\x2e\x30\x36\x34\x2c\x30\x2e\x33\x35\x34\x2d\x30\x2e\
\x32\x38\x31\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x36\x35\x2c\x30\x2e\
\x35\x33\x68\x2d\x34\x2e\x33\x31\x33\x76\x33\x2e\x36\x31\x34\x63\
\x30\x2c\x30\x2e\x34\x36\x37\x2c\x30\x2e\x32\x32\x35\x2c\x30\x2e\
\x36\x35\x38\x2c\x30\x2e\x36\x37\x35\x2c\x30\x2e\x35\x37\x38\x6c\
\x34\x2e\x32\x34\x2d\x30\x2e\x37\x37\x31\x0d\x0a\x09\x09\x63\x30\
\x2e\x33\x35\x34\x2d\x30\x2e\x30\x36\x33\x2c\x30\x2e\x34\x39\x38\
\x2c\x30\x2e\x30\x38\x31\x2c\x30\x2e\x34\x33\x34\x2c\x30\x2e\x34\
\x33\x35\x6c\x2d\x30\x2e\x34\x38\x31\x2c\x32\x2e\x34\x33\x34\x63\
\x2d\x30\x2e\x30\x36\x34\x2c\x30\x2e\x33\x35\x33\x2d\x30\x2e\x33\
\x34\x36\x2c\x30\x2e\x35\x37\x38\x2d\x30\x2e\x38\x34\x34\x2c\x30\
\x2e\x36\x37\x35\x63\x2d\x32\x2e\x32\x39\x37\x2c\x30\x2e\x34\x38\
\x31\x2d\x34\x2e\x30\x32\x33\x2c\x30\x2e\x38\x37\x35\x2d\x35\x2e\
\x31\x38\x31\x2c\x31\x2e\x31\x38\x0d\x0a\x09\x09\x63\x2d\x31\x2e\
\x31\x35\x36\x2c\x30\x2e\x32\x38\x39\x2d\x31\x2e\x38\x35\x34\x2c\
\x30\x2e\x35\x30\x36\x2d\x32\x2e\x30\x39\x36\x2c\x30\x2e\x36\x35\
\x63\x2d\x30\x2e\x32\x32\x36\x2c\x30\x2e\x31\x31\x33\x2d\x30\x2e\
\x33\x37\x2d\x30\x2e\x30\x30\x38\x2d\x30\x2e\x34\x33\x34\x2d\x30\
\x2e\x33\x36\x31\x6c\x2d\x30\x2e\x35\x35\x35\x2d\x33\x2e\x30\x31\
\x32\x63\x2d\x30\x2e\x30\x36\x34\x2d\x30\x2e\x33\x35\x33\x2c\x30\
\x2e\x30\x34\x39\x2d\x30\x2e\x36\x35\x38\x2c\x30\x2e\x33\x33\x38\
\x2d\x30\x2e\x39\x31\x35\x0d\x0a\x09\x09\x63\x30\x2e\x32\x32\x35\
\x2c\x30\x2e\x30\x39\x37\x2c\x30\x2e\x33\x33\x37\x2d\x30\x2e\x32\
\x34\x31\x2c\x30\x2e\x33\x33\x37\x2d\x31\x2e\x30\x31\x33\x56\x39\
\x2e\x38\x35\x33\x63\x30\x2d\x30\x2e\x38\x33\x35\x2c\x30\x2e\x34\
\x37\x34\x2d\x31\x2e\x33\x35\x37\x2c\x31\x2e\x34\x32\x32\x2d\x31\
\x2e\x35\x36\x36\x43\x31\x33\x37\x2e\x34\x36\x39\x2c\x37\x2e\x39\
\x30\x31\x2c\x31\x33\x39\x2e\x31\x38\x38\x2c\x37\x2e\x33\x32\x33\
\x2c\x31\x34\x30\x2e\x38\x35\x38\x2c\x36\x2e\x35\x35\x32\x7a\x0d\
\x0a\x09\x09\x20\x4d\x31\x35\x33\x2e\x33\x33\x39\x2c\x37\x2e\x33\
\x34\x37\x63\x30\x2e\x38\x38\x34\x2c\x30\x2c\x31\x2e\x33\x32\x35\
\x2c\x30\x2e\x34\x34\x32\x2c\x31\x2e\x33\x32\x35\x2c\x31\x2e\x33\
\x32\x35\x56\x32\x31\x2e\x34\x39\x63\x30\x2c\x31\x2e\x36\x35\x34\
\x2d\x30\x2e\x34\x31\x38\x2c\x32\x2e\x37\x33\x39\x2d\x31\x2e\x32\
\x35\x33\x2c\x33\x2e\x32\x35\x33\x63\x2d\x30\x2e\x37\x37\x31\x2c\
\x30\x2e\x34\x38\x31\x2d\x31\x2e\x37\x37\x34\x2c\x30\x2e\x37\x32\
\x34\x2d\x33\x2e\x30\x31\x32\x2c\x30\x2e\x37\x32\x34\x0d\x0a\x09\
\x09\x63\x2d\x30\x2e\x34\x38\x31\x2c\x30\x2d\x30\x2e\x37\x36\x33\
\x2d\x30\x2e\x31\x37\x38\x2d\x30\x2e\x38\x34\x33\x2d\x30\x2e\x35\
\x33\x31\x6c\x2d\x30\x2e\x35\x37\x38\x2d\x32\x2e\x36\x32\x36\x63\
\x2d\x30\x2e\x30\x38\x31\x2d\x30\x2e\x33\x35\x33\x2c\x30\x2e\x30\
\x35\x36\x2d\x30\x2e\x35\x32\x31\x2c\x30\x2e\x34\x30\x39\x2d\x30\
\x2e\x35\x30\x36\x63\x2d\x30\x2e\x30\x38\x2c\x30\x2c\x30\x2e\x33\
\x35\x34\x2c\x30\x2c\x31\x2e\x33\x30\x31\x2c\x30\x0d\x0a\x09\x09\
\x63\x30\x2e\x32\x38\x39\x2c\x30\x2c\x30\x2e\x34\x33\x34\x2d\x30\
\x2e\x31\x34\x35\x2c\x30\x2e\x34\x33\x34\x2d\x30\x2e\x34\x33\x34\
\x76\x2d\x39\x2e\x38\x35\x34\x63\x30\x2d\x30\x2e\x34\x36\x36\x2d\
\x30\x2e\x32\x33\x32\x2d\x30\x2e\x36\x39\x39\x2d\x30\x2e\x36\x39\
\x38\x2d\x30\x2e\x36\x39\x39\x68\x2d\x31\x2e\x37\x38\x33\x63\x2d\
\x30\x2e\x34\x36\x36\x2c\x30\x2d\x30\x2e\x36\x39\x38\x2c\x30\x2e\
\x32\x33\x33\x2d\x30\x2e\x36\x39\x38\x2c\x30\x2e\x36\x39\x39\x56\
\x32\x38\x2e\x30\x32\x0d\x0a\x09\x09\x63\x30\x2c\x30\x2e\x33\x37\
\x2d\x30\x2e\x31\x37\x38\x2c\x30\x2e\x35\x33\x31\x2d\x30\x2e\x35\
\x33\x2c\x30\x2e\x34\x38\x32\x6c\x2d\x32\x2e\x34\x38\x31\x2d\x30\
\x2e\x34\x35\x37\x63\x2d\x30\x2e\x33\x35\x34\x2d\x30\x2e\x30\x34\
\x39\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x32\x35\x38\x2d\x30\x2e\x35\
\x33\x2d\x30\x2e\x36\x32\x37\x56\x38\x2e\x36\x37\x32\x63\x30\x2d\
\x30\x2e\x38\x38\x33\x2c\x30\x2e\x34\x34\x31\x2d\x31\x2e\x33\x32\
\x35\x2c\x31\x2e\x33\x32\x35\x2d\x31\x2e\x33\x32\x35\x48\x31\x35\
\x33\x2e\x33\x33\x39\x7a\x22\x0d\x0a\x09\x09\x2f\x3e\x0d\x0a\x3c\
\x2f\x67\x3e\x0d\x0a\x3c\x2f\x73\x76\x67\x3e\x0d\x0a\
\x00\x00\x02\x17\
\x00\
\x00\x07\x01\x78\x9c\xe5\x55\xcd\x8a\xdb\x30\x10\x7e\x95\x41\xb9\
\xec\x82\x63\xcb\x3f\xb2\xa2\x10\x07\xb6\x21\x39\xa5\xf4\xd0\x5b\
\x2f\xc5\xbb\x56\x6c\xb3\x8e\x1d\x6c\x25\xce\x3e\x44\xe9\xa9\xb7\
\xbe\x5e\xa1\x8f\xb1\x23\x39\xbb\x4d\x42\x02\x66\x61\x0f\x4b\x65\
\xf4\x33\x9a\x6f\x46\xf3\x7d\x12\x78\xd2\xec\x52\xd8\xaf\x8b\xb2\
\x89\x48\xa6\xd4\x66\xec\x38\x6d\xdb\xda\xad\x6f\x57\x75\xea\x78\
\x94\x52\x07\x11\xa4\x83\x8c\xf7\x45\x5e\x3e\x5e\x02\xba\x42\x08\
\xc7\x78\x09\xb4\x79\xa2\xb2\x88\x08\x4a\x09\x64\x32\x4f\x33\x15\
\x91\x50\x1b\xbb\x5c\xb6\x9f\xaa\x7d\x44\x28\x50\x40\x37\x98\xdd\
\x55\x5e\x14\x11\x29\xab\x52\x92\xe9\x24\x91\xab\x66\x3a\xa9\xe5\
\x83\x82\x3c\x89\xc8\x26\x56\xd9\x77\xc4\xe8\x18\x02\x4f\x66\xbc\
\x9a\xde\x99\x4e\x9c\x2e\x3e\x85\x6a\x13\x3f\xe4\x0a\x03\x5c\x02\
\xaa\x8e\xcb\x66\x55\xd5\xeb\x88\x98\x65\x11\x2b\x79\x83\x25\xdc\
\x02\xd4\x95\xea\x8c\x80\x51\xf0\x29\xbd\x25\x87\xc3\xbb\xa2\x06\
\x0b\xd3\xc8\xdb\xd3\xf5\xae\x7c\x1d\x37\x8f\x86\xf2\x7d\x3a\xd4\
\xeb\x17\x5d\xda\x2c\x57\x5a\x98\x6d\x23\xc1\xe8\x3b\xce\x6a\xb9\
\xc2\xd2\x0e\xd2\x20\x67\x74\xe1\xa8\x83\x34\x73\x3d\x47\x64\x5b\
\x17\x37\x83\x43\x2a\xac\x62\x3a\xd1\x70\x93\xff\xef\xcf\xdf\x7f\
\x7e\xfc\xea\xd2\x0f\xeb\x6d\x21\x23\x22\x77\xb2\xac\x92\x84\x40\
\xa3\x9e\xb4\xad\x5d\xe3\xc1\x9c\xe9\xef\x2a\x5f\xef\x84\xb0\x4b\
\x19\x78\x42\x13\x3e\x96\x0a\x8f\xfb\x4c\x2d\x36\xa2\x4b\xcf\xfd\
\x37\xd3\x65\xd7\xd1\xfe\x06\x86\xfc\xdb\xee\xcb\x35\xd7\xd5\x23\
\x96\xd9\xfe\x88\xb9\x21\x13\x82\xfb\xbe\xe0\x34\x94\x43\xd7\x3f\
\xcb\x17\x08\x5b\x1c\x35\x8e\xd9\x6d\x7a\xd2\x98\x3e\xed\x55\xc6\
\x59\xb5\xbe\xcf\x4b\x99\xc0\xd7\x2c\xde\xc8\x3e\x72\x06\xf3\xbb\
\xf9\xa2\x27\xbb\x5e\xd5\x5c\x90\xda\xa3\x4b\x7c\x5d\x2f\x53\x38\
\x33\x86\x1d\x0a\x18\x09\x6e\xfb\xae\x45\x71\x11\xa0\xf8\xa1\x45\
\x67\x7a\x1f\x37\x0e\x00\x44\x2f\x75\xe0\xe1\x46\x5e\x79\x7e\xd9\
\xc5\x45\x0f\x76\x0b\x81\xdf\xdd\x15\x76\x1c\xf8\x59\xf5\xc1\x31\
\x5b\xdf\x66\xd8\x03\x71\xd2\xc2\x0b\xfc\xb8\x85\xd0\x19\xb7\x5c\
\x9b\x71\x60\x76\xe0\x5b\x26\x18\xb9\xe8\x1d\xc3\xc5\xb8\xa8\xc1\
\xe1\x03\x43\x08\x18\x17\x37\x38\x3e\x33\x41\x58\x4e\xe7\x32\xf9\
\xde\x83\xb0\xfb\xdf\x31\xf6\x3e\x02\x63\x27\x3d\xeb\xf8\x67\x9b\
\x3e\x03\xf1\x7a\xec\xab\
"
qt_resource_name = b"\
\x00\x03\
\x00\x00\x70\x37\
\x00\x69\
\x00\x6d\x00\x67\
\x00\x13\
\x0a\x25\xf2\x27\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x2d\x00\x68\x00\x6f\x00\x72\x00\x69\x00\x7a\x00\x6f\x00\x6e\x00\x74\x00\x61\x00\x6c\x00\x2e\x00\x73\
\x00\x76\x00\x67\
\x00\x06\
\x03\x32\x4a\x27\
\x80\xcc\
\x66\x6f\x00\x2e\x00\x73\x00\x76\x00\x67\
"
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) | null |
155,954 | from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x22\x3e\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x75\x74\x66\
\x2d\x38\x22\x3f\x3e\x0d\x0a\x3c\x21\x2d\x2d\x20\x47\x65\x6e\x65\
\x72\x61\x74\x6f\x72\x3a\x20\x41\x64\x6f\x62\x65\x20\x49\x6c\x6c\
\x75\x73\x74\x72\x61\x74\x6f\x72\x20\x31\x35\x2e\x30\x2e\x30\x2c\
\x20\x53\x56\x47\x20\x45\x78\x70\x6f\x72\x74\x20\x50\x6c\x75\x67\
\x2d\x49\x6e\x20\x2e\x20\x53\x56\x47\x20\x56\x65\x72\x73\x69\x6f\
\x6e\x3a\x20\x36\x2e\x30\x30\x20\x42\x75\x69\x6c\x64\x20\x30\x29\
\x20\x20\x2d\x2d\x3e\x0d\x0a\x3c\x21\x44\x4f\x43\x54\x59\x50\x45\
\x20\x73\x76\x67\x20\x50\x55\x42\x4c\x49\x43\x20\x22\x2d\x2f\x2f\
\x57\x33\x43\x2f\x2f\x44\x54\x44\x20\x53\x56\x47\x20\x31\x2e\x31\
\x2f\x2f\x45\x4e\x22\x20\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\
\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x47\x72\x61\x70\x68\x69\x63\
\x73\x2f\x53\x56\x47\x2f\x31\x2e\x31\x2f\x44\x54\x44\x2f\x73\x76\
\x67\x31\x31\x2e\x64\x74\x64\x22\x3e\x0d\x0a\x3c\x73\x76\x67\x20\
\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\x31\x22\x20\x69\x64\
\x3d\x22\xe5\x9b\xbe\xe5\xb1\x82\x5f\x31\x22\x20\x78\x6d\x6c\x6e\
\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\
\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x20\x78\
\x6d\x6c\x6e\x73\x3a\x78\x6c\x69\x6e\x6b\x3d\x22\x68\x74\x74\x70\
\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\
\x39\x39\x2f\x78\x6c\x69\x6e\x6b\x22\x20\x78\x3d\x22\x30\x70\x78\
\x22\x20\x79\x3d\x22\x30\x70\x78\x22\x0d\x0a\x09\x20\x77\x69\x64\
\x74\x68\x3d\x22\x31\x35\x38\x70\x78\x22\x20\x68\x65\x69\x67\x68\
\x74\x3d\x22\x33\x34\x70\x78\x22\x20\x76\x69\x65\x77\x42\x6f\x78\
\x3d\x22\x30\x20\x30\x20\x31\x35\x38\x20\x33\x34\x22\x20\x65\x6e\
\x61\x62\x6c\x65\x2d\x62\x61\x63\x6b\x67\x72\x6f\x75\x6e\x64\x3d\
\x22\x6e\x65\x77\x20\x30\x20\x30\x20\x31\x35\x38\x20\x33\x34\x22\
\x20\x78\x6d\x6c\x3a\x73\x70\x61\x63\x65\x3d\x22\x70\x72\x65\x73\
\x65\x72\x76\x65\x22\x3e\x0d\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\
\x6c\x6c\x3d\x22\x23\x30\x30\x46\x41\x46\x30\x22\x20\x64\x3d\x22\
\x4d\x31\x31\x2e\x36\x34\x2c\x31\x33\x2e\x34\x33\x34\x76\x2d\x31\
\x2e\x33\x31\x31\x63\x2d\x30\x2e\x34\x35\x36\x2d\x30\x2e\x30\x35\
\x35\x2d\x30\x2e\x39\x31\x31\x2d\x30\x2e\x30\x39\x31\x2d\x31\x2e\
\x33\x38\x35\x2d\x30\x2e\x30\x39\x31\x63\x2d\x35\x2e\x36\x34\x37\
\x2c\x30\x2d\x31\x30\x2e\x32\x35\x36\x2c\x34\x2e\x35\x38\x38\x2d\
\x31\x30\x2e\x32\x35\x36\x2c\x31\x30\x2e\x32\x35\x31\x0d\x0a\x09\
\x63\x30\x2c\x33\x2e\x34\x35\x39\x2c\x31\x2e\x37\x33\x31\x2c\x36\
\x2e\x35\x33\x35\x2c\x34\x2e\x33\x37\x32\x2c\x38\x2e\x33\x39\x33\
\x63\x2d\x31\x2e\x37\x31\x32\x2d\x31\x2e\x38\x33\x39\x2d\x32\x2e\
\x37\x35\x2d\x34\x2e\x32\x37\x39\x2d\x32\x2e\x37\x35\x2d\x36\x2e\
\x39\x37\x33\x43\x31\x2e\x36\x33\x39\x2c\x31\x38\x2e\x31\x31\x33\
\x2c\x36\x2e\x31\x30\x32\x2c\x31\x33\x2e\x35\x36\x32\x2c\x31\x31\
\x2e\x36\x34\x2c\x31\x33\x2e\x34\x33\x34\x7a\x22\x2f\x3e\x0d\x0a\
\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x30\x30\x46\
\x41\x46\x30\x22\x20\x64\x3d\x22\x4d\x31\x31\x2e\x38\x37\x36\x2c\
\x32\x38\x2e\x33\x34\x36\x63\x32\x2e\x35\x31\x34\x2c\x30\x2c\x34\
\x2e\x35\x37\x32\x2d\x32\x2e\x30\x30\x34\x2c\x34\x2e\x36\x36\x34\
\x2d\x34\x2e\x34\x39\x38\x56\x31\x2e\x35\x34\x35\x68\x34\x2e\x30\
\x38\x31\x63\x2d\x30\x2e\x30\x39\x31\x2d\x30\x2e\x34\x35\x35\x2d\
\x30\x2e\x31\x32\x37\x2d\x30\x2e\x39\x32\x38\x2d\x30\x2e\x31\x32\
\x37\x2d\x31\x2e\x34\x32\x48\x31\x34\x2e\x39\x32\x0d\x0a\x09\x76\
\x32\x32\x2e\x33\x30\x33\x63\x2d\x30\x2e\x30\x39\x31\x2c\x32\x2e\
\x34\x39\x34\x2d\x32\x2e\x31\x35\x2c\x34\x2e\x34\x39\x38\x2d\x34\
\x2e\x36\x36\x34\x2c\x34\x2e\x34\x39\x38\x63\x2d\x30\x2e\x37\x38\
\x33\x2c\x30\x2d\x31\x2e\x35\x33\x2d\x30\x2e\x32\x30\x31\x2d\x32\
\x2e\x31\x36\x37\x2d\x30\x2e\x35\x34\x37\x43\x38\x2e\x39\x34\x34\
\x2c\x32\x37\x2e\x35\x38\x2c\x31\x30\x2e\x33\x32\x38\x2c\x32\x38\
\x2e\x33\x34\x36\x2c\x31\x31\x2e\x38\x37\x36\x2c\x32\x38\x2e\x33\
\x34\x36\x7a\x22\x2f\x3e\x0d\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\
\x6c\x6c\x3d\x22\x23\x30\x30\x46\x41\x46\x30\x22\x20\x64\x3d\x22\
\x4d\x32\x38\x2e\x32\x35\x31\x2c\x39\x2e\x31\x31\x39\x56\x37\x2e\
\x38\x38\x31\x63\x2d\x31\x2e\x35\x34\x38\x2c\x30\x2d\x33\x2e\x30\
\x30\x36\x2d\x30\x2e\x34\x35\x35\x2d\x34\x2e\x32\x32\x36\x2d\x31\
\x2e\x32\x35\x36\x43\x32\x35\x2e\x31\x31\x38\x2c\x37\x2e\x38\x36\
\x33\x2c\x32\x36\x2e\x35\x39\x34\x2c\x38\x2e\x37\x35\x35\x2c\x32\
\x38\x2e\x32\x35\x31\x2c\x39\x2e\x31\x31\x39\x7a\x22\x2f\x3e\x0d\
\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\
\x30\x30\x35\x30\x22\x20\x64\x3d\x22\x4d\x32\x34\x2e\x30\x34\x35\
\x2c\x36\x2e\x36\x32\x35\x63\x2d\x31\x2e\x31\x38\x34\x2d\x31\x2e\
\x33\x36\x35\x2d\x31\x2e\x39\x31\x33\x2d\x33\x2e\x31\x33\x31\x2d\
\x31\x2e\x39\x31\x33\x2d\x35\x2e\x30\x38\x68\x2d\x31\x2e\x34\x39\
\x34\x43\x32\x31\x2e\x30\x33\x39\x2c\x33\x2e\x36\x35\x38\x2c\x32\
\x32\x2e\x32\x39\x36\x2c\x35\x2e\x34\x37\x39\x2c\x32\x34\x2e\x30\
\x34\x35\x2c\x36\x2e\x36\x32\x35\x7a\x22\x2f\x3e\x0d\x0a\x3c\x70\
\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\x30\x30\x35\
\x30\x22\x20\x64\x3d\x22\x4d\x31\x30\x2e\x32\x35\x35\x2c\x31\x37\
\x2e\x35\x38\x34\x63\x2d\x32\x2e\x35\x38\x36\x2c\x30\x2d\x34\x2e\
\x36\x38\x32\x2c\x32\x2e\x30\x39\x34\x2d\x34\x2e\x36\x38\x32\x2c\
\x34\x2e\x36\x38\x63\x30\x2c\x31\x2e\x38\x30\x33\x2c\x31\x2e\x30\
\x32\x31\x2c\x33\x2e\x33\x35\x2c\x32\x2e\x35\x31\x34\x2c\x34\x2e\
\x31\x33\x33\x0d\x0a\x09\x63\x2d\x30\x2e\x35\x34\x36\x2d\x30\x2e\
\x37\x36\x35\x2d\x30\x2e\x38\x39\x33\x2d\x31\x2e\x37\x31\x31\x2d\
\x30\x2e\x38\x39\x33\x2d\x32\x2e\x37\x32\x39\x63\x30\x2d\x32\x2e\
\x35\x38\x36\x2c\x32\x2e\x30\x39\x35\x2d\x34\x2e\x36\x38\x31\x2c\
\x34\x2e\x36\x38\x32\x2d\x34\x2e\x36\x38\x31\x63\x30\x2e\x34\x37\
\x34\x2c\x30\x2c\x30\x2e\x39\x34\x37\x2c\x30\x2e\x30\x37\x34\x2c\
\x31\x2e\x33\x38\x34\x2c\x30\x2e\x32\x32\x76\x2d\x35\x2e\x36\x38\
\x0d\x0a\x09\x63\x2d\x30\x2e\x34\x35\x35\x2d\x30\x2e\x30\x35\x35\
\x2d\x30\x2e\x39\x31\x31\x2d\x30\x2e\x30\x39\x31\x2d\x31\x2e\x33\
\x38\x34\x2d\x30\x2e\x30\x39\x31\x63\x2d\x30\x2e\x30\x37\x33\x2c\
\x30\x2d\x30\x2e\x31\x36\x34\x2c\x30\x2d\x30\x2e\x32\x33\x37\x2c\
\x30\x76\x34\x2e\x33\x37\x43\x31\x31\x2e\x32\x30\x32\x2c\x31\x37\
\x2e\x36\x35\x38\x2c\x31\x30\x2e\x37\x34\x37\x2c\x31\x37\x2e\x35\
\x38\x34\x2c\x31\x30\x2e\x32\x35\x35\x2c\x31\x37\x2e\x35\x38\x34\
\x7a\x22\x2f\x3e\x0d\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\
\x3d\x22\x23\x46\x46\x30\x30\x35\x30\x22\x20\x64\x3d\x22\x4d\x32\
\x38\x2e\x32\x35\x32\x2c\x39\x2e\x31\x31\x39\x76\x34\x2e\x33\x33\
\x33\x63\x2d\x32\x2e\x38\x39\x36\x2c\x30\x2d\x35\x2e\x35\x35\x36\
\x2d\x30\x2e\x39\x32\x38\x2d\x37\x2e\x37\x34\x32\x2d\x32\x2e\x34\
\x39\x34\x76\x31\x31\x2e\x33\x30\x36\x63\x30\x2c\x35\x2e\x36\x34\
\x36\x2d\x34\x2e\x35\x39\x2c\x31\x30\x2e\x32\x35\x2d\x31\x30\x2e\
\x32\x35\x35\x2c\x31\x30\x2e\x32\x35\x0d\x0a\x09\x63\x2d\x32\x2e\
\x31\x38\x36\x2c\x30\x2d\x34\x2e\x32\x30\x38\x2d\x30\x2e\x36\x39\
\x31\x2d\x35\x2e\x38\x36\x36\x2d\x31\x2e\x38\x35\x35\x63\x31\x2e\
\x38\x37\x36\x2c\x32\x2e\x30\x30\x33\x2c\x34\x2e\x35\x33\x36\x2c\
\x33\x2e\x32\x37\x37\x2c\x37\x2e\x35\x30\x35\x2c\x33\x2e\x32\x37\
\x37\x63\x35\x2e\x36\x34\x37\x2c\x30\x2c\x31\x30\x2e\x32\x35\x36\
\x2d\x34\x2e\x35\x38\x38\x2c\x31\x30\x2e\x32\x35\x36\x2d\x31\x30\
\x2e\x32\x35\x32\x56\x31\x32\x2e\x33\x37\x38\x0d\x0a\x09\x63\x32\
\x2e\x31\x38\x36\x2c\x31\x2e\x35\x36\x36\x2c\x34\x2e\x38\x36\x34\
\x2c\x32\x2e\x34\x39\x35\x2c\x37\x2e\x37\x34\x32\x2c\x32\x2e\x34\
\x39\x35\x56\x39\x2e\x33\x30\x31\x43\x32\x39\x2e\x33\x32\x37\x2c\
\x39\x2e\x32\x38\x33\x2c\x32\x38\x2e\x37\x38\x31\x2c\x39\x2e\x32\
\x32\x39\x2c\x32\x38\x2e\x32\x35\x32\x2c\x39\x2e\x31\x31\x39\x7a\
\x22\x2f\x3e\x0d\x0a\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\
\x22\x23\x46\x46\x46\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x32\x30\
\x2e\x35\x30\x39\x2c\x32\x32\x2e\x32\x36\x36\x56\x31\x30\x2e\x39\
\x35\x38\x63\x32\x2e\x31\x38\x36\x2c\x31\x2e\x35\x36\x36\x2c\x34\
\x2e\x38\x36\x34\x2c\x32\x2e\x34\x39\x34\x2c\x37\x2e\x37\x34\x32\
\x2c\x32\x2e\x34\x39\x34\x56\x39\x2e\x31\x32\x63\x2d\x31\x2e\x36\
\x37\x36\x2d\x30\x2e\x33\x36\x34\x2d\x33\x2e\x31\x33\x33\x2d\x31\
\x2e\x32\x35\x36\x2d\x34\x2e\x32\x32\x37\x2d\x32\x2e\x34\x39\x34\
\x0d\x0a\x09\x63\x2d\x31\x2e\x37\x34\x39\x2d\x31\x2e\x31\x32\x39\
\x2d\x33\x2e\x30\x30\x35\x2d\x32\x2e\x39\x34\x39\x2d\x33\x2e\x33\
\x38\x38\x2d\x35\x2e\x30\x38\x68\x2d\x34\x2e\x30\x38\x56\x32\x33\
\x2e\x38\x35\x63\x2d\x30\x2e\x30\x39\x31\x2c\x32\x2e\x34\x39\x34\
\x2d\x32\x2e\x31\x35\x2c\x34\x2e\x34\x39\x36\x2d\x34\x2e\x36\x36\
\x34\x2c\x34\x2e\x34\x39\x36\x63\x2d\x31\x2e\x35\x36\x36\x2c\x30\
\x2d\x32\x2e\x39\x35\x31\x2d\x30\x2e\x37\x36\x35\x2d\x33\x2e\x37\
\x38\x39\x2d\x31\x2e\x39\x34\x37\x0d\x0a\x09\x63\x2d\x31\x2e\x34\
\x39\x34\x2d\x30\x2e\x37\x38\x33\x2d\x32\x2e\x35\x31\x34\x2d\x32\
\x2e\x33\x35\x2d\x32\x2e\x35\x31\x34\x2d\x34\x2e\x31\x33\x33\x63\
\x30\x2d\x32\x2e\x35\x38\x36\x2c\x32\x2e\x30\x39\x35\x2d\x34\x2e\
\x36\x38\x2c\x34\x2e\x36\x38\x32\x2d\x34\x2e\x36\x38\x63\x30\x2e\
\x34\x37\x34\x2c\x30\x2c\x30\x2e\x39\x34\x37\x2c\x30\x2e\x30\x37\
\x32\x2c\x31\x2e\x33\x38\x34\x2c\x30\x2e\x32\x31\x39\x76\x2d\x34\
\x2e\x33\x37\x0d\x0a\x09\x63\x2d\x35\x2e\x35\x33\x36\x2c\x30\x2e\
\x31\x32\x37\x2d\x31\x30\x2c\x34\x2e\x36\x37\x38\x2d\x31\x30\x2c\
\x31\x30\x2e\x32\x33\x31\x63\x30\x2c\x32\x2e\x36\x39\x35\x2c\x31\
\x2e\x30\x33\x39\x2c\x35\x2e\x31\x35\x32\x2c\x32\x2e\x37\x35\x31\
\x2c\x36\x2e\x39\x37\x35\x63\x31\x2e\x36\x35\x38\x2c\x31\x2e\x31\
\x36\x34\x2c\x33\x2e\x36\x39\x38\x2c\x31\x2e\x38\x35\x36\x2c\x35\
\x2e\x38\x36\x36\x2c\x31\x2e\x38\x35\x36\x0d\x0a\x09\x43\x31\x35\
\x2e\x39\x2c\x33\x32\x2e\x35\x31\x36\x2c\x32\x30\x2e\x35\x30\x39\
\x2c\x32\x37\x2e\x39\x30\x38\x2c\x32\x30\x2e\x35\x30\x39\x2c\x32\
\x32\x2e\x32\x36\x36\x7a\x22\x2f\x3e\x0d\x0a\x3c\x67\x3e\x0d\x0a\
\x09\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\
\x46\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x34\x34\x2e\x35\x37\x36\
\x2c\x31\x35\x2e\x38\x37\x36\x63\x30\x2e\x33\x35\x33\x2d\x30\x2e\
\x30\x38\x2c\x30\x2e\x35\x32\x32\x2c\x30\x2e\x30\x36\x34\x2c\x30\
\x2e\x35\x30\x36\x2c\x30\x2e\x34\x33\x34\x6c\x2d\x30\x2e\x31\x32\
\x2c\x32\x2e\x32\x36\x35\x63\x2d\x30\x2e\x30\x31\x37\x2c\x30\x2e\
\x33\x37\x2d\x30\x2e\x32\x30\x31\x2c\x30\x2e\x35\x39\x35\x2d\x30\
\x2e\x35\x35\x34\x2c\x30\x2e\x36\x37\x35\x0d\x0a\x09\x09\x6c\x2d\
\x31\x2e\x36\x31\x34\x2c\x30\x2e\x34\x30\x39\x76\x35\x2e\x32\x30\
\x34\x63\x30\x2c\x30\x2e\x38\x32\x2d\x30\x2e\x30\x38\x39\x2c\x31\
\x2e\x34\x35\x35\x2d\x30\x2e\x32\x36\x35\x2c\x31\x2e\x39\x30\x34\
\x63\x2d\x30\x2e\x31\x36\x31\x2c\x30\x2e\x34\x34\x39\x2d\x30\x2e\
\x34\x36\x36\x2c\x30\x2e\x37\x39\x35\x2d\x30\x2e\x39\x31\x36\x2c\
\x31\x2e\x30\x33\x35\x63\x2d\x30\x2e\x37\x32\x33\x2c\x30\x2e\x34\
\x31\x38\x2d\x31\x2e\x37\x37\x35\x2c\x30\x2e\x36\x31\x39\x2d\x33\
\x2e\x31\x35\x37\x2c\x30\x2e\x36\x30\x34\x0d\x0a\x09\x09\x63\x2d\
\x30\x2e\x34\x39\x38\x2c\x30\x2e\x30\x31\x36\x2d\x30\x2e\x37\x36\
\x33\x2d\x30\x2e\x31\x36\x32\x2d\x30\x2e\x37\x39\x35\x2d\x30\x2e\
\x35\x33\x31\x6c\x2d\x30\x2e\x32\x34\x31\x2d\x32\x2e\x30\x34\x37\
\x63\x2d\x30\x2e\x30\x33\x32\x2d\x30\x2e\x33\x37\x2c\x30\x2e\x31\
\x33\x37\x2d\x30\x2e\x35\x35\x35\x2c\x30\x2e\x35\x30\x36\x2d\x30\
\x2e\x35\x35\x35\x68\x30\x2e\x39\x38\x38\x0d\x0a\x09\x09\x63\x30\
\x2e\x33\x30\x35\x2c\x30\x2e\x30\x31\x36\x2c\x30\x2e\x34\x35\x38\
\x2d\x30\x2e\x31\x32\x39\x2c\x30\x2e\x34\x35\x38\x2d\x30\x2e\x34\
\x33\x34\x76\x2d\x34\x2e\x33\x33\x38\x6c\x2d\x31\x2e\x36\x36\x32\
\x2c\x30\x2e\x34\x31\x63\x2d\x30\x2e\x33\x35\x34\x2c\x30\x2e\x30\
\x38\x2d\x30\x2e\x35\x33\x39\x2d\x30\x2e\x30\x36\x34\x2d\x30\x2e\
\x35\x35\x34\x2d\x30\x2e\x34\x33\x34\x6c\x2d\x30\x2e\x32\x34\x31\
\x2d\x32\x2e\x33\x33\x37\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x30\x31\
\x36\x2d\x30\x2e\x33\x37\x2c\x30\x2e\x31\x36\x31\x2d\x30\x2e\x35\
\x39\x35\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x36\x37\x35\x6c\x31\x2e\
\x39\x32\x37\x2d\x30\x2e\x33\x38\x36\x76\x2d\x33\x2e\x36\x36\x32\
\x68\x2d\x32\x2e\x31\x36\x38\x63\x2d\x30\x2e\x33\x37\x2c\x30\x2d\
\x30\x2e\x35\x32\x32\x2d\x30\x2e\x31\x37\x37\x2d\x30\x2e\x34\x35\
\x38\x2d\x30\x2e\x35\x33\x6c\x30\x2e\x34\x31\x2d\x32\x2e\x31\x36\
\x38\x0d\x0a\x09\x09\x63\x30\x2e\x30\x36\x34\x2d\x30\x2e\x33\x35\
\x33\x2c\x30\x2e\x32\x38\x31\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x36\
\x35\x2d\x30\x2e\x35\x33\x68\x31\x2e\x35\x36\x36\x56\x36\x2e\x32\
\x38\x37\x63\x30\x2d\x30\x2e\x33\x37\x2c\x30\x2e\x31\x37\x37\x2d\
\x30\x2e\x35\x33\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x34\x38\x32\x6c\
\x32\x2e\x33\x36\x31\x2c\x30\x2e\x34\x35\x38\x63\x30\x2e\x33\x35\
\x33\x2c\x30\x2e\x30\x34\x38\x2c\x30\x2e\x35\x33\x2c\x30\x2e\x32\
\x35\x37\x2c\x30\x2e\x35\x33\x2c\x30\x2e\x36\x32\x36\x76\x33\x2e\
\x33\x30\x31\x0d\x0a\x09\x09\x68\x32\x2e\x30\x39\x36\x63\x30\x2e\
\x33\x36\x39\x2c\x30\x2c\x30\x2e\x35\x32\x31\x2c\x30\x2e\x31\x37\
\x37\x2c\x30\x2e\x34\x35\x38\x2c\x30\x2e\x35\x33\x6c\x2d\x30\x2e\
\x34\x31\x2c\x32\x2e\x31\x36\x38\x63\x2d\x30\x2e\x30\x36\x34\x2c\
\x30\x2e\x33\x35\x34\x2d\x30\x2e\x32\x38\x31\x2c\x30\x2e\x35\x33\
\x2d\x30\x2e\x36\x35\x2c\x30\x2e\x35\x33\x68\x2d\x31\x2e\x34\x39\
\x34\x76\x32\x2e\x38\x39\x31\x4c\x34\x34\x2e\x35\x37\x36\x2c\x31\
\x35\x2e\x38\x37\x36\x7a\x20\x4d\x35\x31\x2e\x34\x39\x31\x2c\x31\
\x35\x2e\x32\x35\x0d\x0a\x09\x09\x63\x30\x2e\x32\x37\x33\x2c\x30\
\x2e\x32\x32\x35\x2c\x30\x2e\x32\x38\x39\x2c\x30\x2e\x34\x36\x36\
\x2c\x30\x2e\x30\x34\x38\x2c\x30\x2e\x37\x32\x33\x6c\x2d\x31\x2e\
\x35\x36\x35\x2c\x31\x2e\x36\x38\x38\x63\x2d\x30\x2e\x32\x34\x31\
\x2c\x30\x2e\x32\x35\x36\x2d\x30\x2e\x34\x37\x35\x2c\x30\x2e\x32\
\x38\x31\x2d\x30\x2e\x36\x39\x39\x2c\x30\x2e\x30\x37\x32\x63\x2d\
\x30\x2e\x33\x35\x34\x2d\x30\x2e\x34\x31\x38\x2d\x31\x2e\x33\x38\
\x32\x2d\x31\x2e\x31\x31\x37\x2d\x33\x2e\x30\x38\x34\x2d\x32\x2e\
\x30\x39\x37\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x34\x31\x38\x2d\x30\
\x2e\x32\x35\x37\x2d\x30\x2e\x34\x39\x2d\x30\x2e\x34\x39\x38\x2d\
\x30\x2e\x32\x31\x37\x2d\x30\x2e\x37\x32\x33\x6c\x31\x2e\x36\x36\
\x32\x2d\x31\x2e\x34\x34\x36\x63\x30\x2e\x32\x37\x33\x2d\x30\x2e\
\x32\x32\x35\x2c\x30\x2e\x36\x31\x2d\x30\x2e\x32\x33\x32\x2c\x31\
\x2e\x30\x31\x33\x2d\x30\x2e\x30\x32\x34\x43\x34\x39\x2e\x38\x38\
\x35\x2c\x31\x34\x2e\x30\x35\x34\x2c\x35\x30\x2e\x38\x33\x32\x2c\
\x31\x34\x2e\x36\x35\x36\x2c\x35\x31\x2e\x34\x39\x31\x2c\x31\x35\
\x2e\x32\x35\x7a\x0d\x0a\x09\x09\x20\x4d\x35\x38\x2e\x39\x31\x32\
\x2c\x31\x38\x2e\x39\x36\x31\x76\x32\x2e\x32\x31\x37\x63\x30\x2c\
\x30\x2e\x33\x36\x39\x2d\x30\x2e\x31\x38\x35\x2c\x30\x2e\x35\x38\
\x36\x2d\x30\x2e\x35\x35\x34\x2c\x30\x2e\x36\x35\x6c\x2d\x31\x2e\
\x39\x32\x38\x2c\x30\x2e\x33\x33\x37\x76\x35\x2e\x38\x38\x63\x30\
\x2c\x30\x2e\x33\x36\x39\x2d\x30\x2e\x31\x37\x37\x2c\x30\x2e\x35\
\x32\x39\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x34\x38\x6c\x2d\x32\x2e\
\x34\x30\x39\x2d\x30\x2e\x34\x35\x37\x0d\x0a\x09\x09\x63\x2d\x30\
\x2e\x33\x35\x34\x2d\x30\x2e\x30\x34\x39\x2d\x30\x2e\x35\x33\x2d\
\x30\x2e\x32\x35\x38\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x36\x32\x37\
\x76\x2d\x34\x2e\x36\x39\x37\x6c\x2d\x36\x2e\x35\x30\x36\x2c\x31\
\x2e\x31\x33\x32\x63\x2d\x30\x2e\x33\x35\x34\x2c\x30\x2e\x30\x34\
\x38\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x31\x31\x32\x2d\x30\x2e\x35\
\x33\x2d\x30\x2e\x34\x38\x31\x6c\x2d\x30\x2e\x30\x34\x38\x2d\x32\
\x2e\x33\x31\x33\x0d\x0a\x09\x09\x63\x30\x2d\x30\x2e\x33\x37\x2c\
\x30\x2e\x31\x38\x35\x2d\x30\x2e\x35\x37\x39\x2c\x30\x2e\x35\x35\
\x34\x2d\x30\x2e\x36\x32\x36\x6c\x36\x2e\x35\x32\x39\x2d\x31\x2e\
\x30\x38\x35\x56\x36\x2e\x32\x36\x33\x63\x30\x2d\x30\x2e\x33\x36\
\x39\x2c\x30\x2e\x31\x37\x37\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x35\
\x33\x2d\x30\x2e\x34\x38\x32\x4c\x35\x35\x2e\x39\x2c\x36\x2e\x32\
\x33\x39\x63\x30\x2e\x33\x35\x34\x2c\x30\x2e\x30\x34\x38\x2c\x30\
\x2e\x35\x33\x2c\x30\x2e\x32\x35\x37\x2c\x30\x2e\x35\x33\x2c\x30\
\x2e\x36\x32\x36\x0d\x0a\x09\x09\x76\x31\x31\x2e\x39\x35\x31\x6c\
\x31\x2e\x39\x35\x32\x2d\x30\x2e\x33\x33\x38\x43\x35\x38\x2e\x37\
\x33\x35\x2c\x31\x38\x2e\x34\x33\x31\x2c\x35\x38\x2e\x39\x31\x32\
\x2c\x31\x38\x2e\x35\x39\x32\x2c\x35\x38\x2e\x39\x31\x32\x2c\x31\
\x38\x2e\x39\x36\x31\x7a\x20\x4d\x35\x32\x2e\x34\x35\x35\x2c\x39\
\x2e\x33\x39\x35\x63\x30\x2e\x32\x35\x37\x2c\x30\x2e\x32\x34\x31\
\x2c\x30\x2e\x32\x35\x37\x2c\x30\x2e\x34\x39\x2c\x30\x2c\x30\x2e\
\x37\x34\x37\x6c\x2d\x31\x2e\x35\x36\x36\x2c\x31\x2e\x34\x39\x34\
\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x32\x35\x37\x2c\x30\x2e\x32\x35\
\x37\x2d\x30\x2e\x35\x30\x36\x2c\x30\x2e\x32\x37\x33\x2d\x30\x2e\
\x37\x34\x37\x2c\x30\x2e\x30\x34\x38\x63\x2d\x30\x2e\x33\x36\x39\
\x2d\x30\x2e\x34\x39\x38\x2d\x31\x2e\x33\x31\x36\x2d\x31\x2e\x32\
\x30\x35\x2d\x32\x2e\x38\x34\x33\x2d\x32\x2e\x31\x32\x63\x2d\x30\
\x2e\x33\x38\x35\x2d\x30\x2e\x32\x35\x37\x2d\x30\x2e\x34\x33\x34\
\x2d\x30\x2e\x34\x39\x38\x2d\x30\x2e\x31\x34\x35\x2d\x30\x2e\x37\
\x32\x33\x6c\x31\x2e\x36\x36\x32\x2d\x31\x2e\x33\x30\x31\x0d\x0a\
\x09\x09\x63\x30\x2e\x32\x39\x2d\x30\x2e\x32\x32\x35\x2c\x30\x2e\
\x36\x32\x37\x2d\x30\x2e\x32\x32\x35\x2c\x31\x2e\x30\x31\x33\x2c\
\x30\x43\x35\x31\x2e\x30\x31\x38\x2c\x38\x2e\x31\x36\x36\x2c\x35\
\x31\x2e\x38\x39\x33\x2c\x38\x2e\x37\x38\x35\x2c\x35\x32\x2e\x34\
\x35\x35\x2c\x39\x2e\x33\x39\x35\x7a\x22\x2f\x3e\x0d\x0a\x09\x3c\
\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\x46\x46\
\x46\x46\x22\x20\x64\x3d\x22\x4d\x37\x38\x2e\x34\x37\x37\x2c\x31\
\x33\x2e\x32\x35\x68\x34\x2e\x31\x36\x39\x63\x30\x2e\x33\x36\x39\
\x2c\x30\x2c\x30\x2e\x35\x32\x31\x2c\x30\x2e\x31\x37\x37\x2c\x30\
\x2e\x34\x35\x38\x2c\x30\x2e\x35\x33\x6c\x2d\x30\x2e\x34\x31\x2c\
\x32\x2e\x31\x34\x35\x63\x2d\x30\x2e\x30\x36\x34\x2c\x30\x2e\x33\
\x35\x34\x2d\x30\x2e\x32\x38\x31\x2c\x30\x2e\x35\x33\x2d\x30\x2e\
\x36\x35\x2c\x30\x2e\x35\x33\x48\x36\x31\x2e\x32\x30\x31\x0d\x0a\
\x09\x09\x63\x2d\x30\x2e\x33\x37\x2c\x30\x2d\x30\x2e\x35\x32\x32\
\x2d\x30\x2e\x31\x37\x36\x2d\x30\x2e\x34\x35\x38\x2d\x30\x2e\x35\
\x33\x6c\x30\x2e\x34\x31\x2d\x32\x2e\x31\x34\x35\x63\x30\x2e\x30\
\x36\x33\x2d\x30\x2e\x33\x35\x33\x2c\x30\x2e\x32\x38\x2d\x30\x2e\
\x35\x33\x2c\x30\x2e\x36\x35\x2d\x30\x2e\x35\x33\x68\x33\x2e\x38\
\x37\x39\x6c\x2d\x30\x2e\x32\x31\x37\x2d\x30\x2e\x35\x35\x34\x0d\
\x0a\x09\x09\x63\x2d\x30\x2e\x33\x36\x39\x2d\x30\x2e\x39\x37\x39\
\x2d\x30\x2e\x30\x34\x2d\x31\x2e\x35\x35\x38\x2c\x30\x2e\x39\x38\
\x38\x2d\x31\x2e\x37\x33\x34\x6c\x30\x2e\x37\x32\x33\x2d\x30\x2e\
\x31\x34\x35\x68\x2d\x34\x2e\x36\x30\x33\x63\x2d\x30\x2e\x33\x36\
\x39\x2c\x30\x2d\x30\x2e\x35\x32\x31\x2d\x30\x2e\x31\x37\x36\x2d\
\x30\x2e\x34\x35\x37\x2d\x30\x2e\x35\x33\x6c\x30\x2e\x34\x30\x39\
\x2d\x32\x2e\x31\x34\x35\x0d\x0a\x09\x09\x63\x30\x2e\x30\x36\x34\
\x2d\x30\x2e\x33\x35\x33\x2c\x30\x2e\x32\x38\x31\x2d\x30\x2e\x35\
\x33\x2c\x30\x2e\x36\x35\x2d\x30\x2e\x35\x33\x68\x36\x2e\x39\x33\
\x39\x6c\x2d\x30\x2e\x34\x38\x32\x2d\x31\x2e\x31\x33\x33\x63\x2d\
\x30\x2e\x31\x32\x38\x2d\x30\x2e\x33\x33\x37\x2d\x30\x2e\x30\x30\
\x38\x2d\x30\x2e\x35\x30\x36\x2c\x30\x2e\x33\x36\x32\x2d\x30\x2e\
\x35\x30\x36\x6c\x32\x2e\x39\x36\x33\x2d\x30\x2e\x30\x32\x34\x0d\
\x0a\x09\x09\x63\x30\x2e\x33\x36\x39\x2c\x30\x2c\x30\x2e\x36\x32\
\x37\x2c\x30\x2e\x31\x37\x37\x2c\x30\x2e\x37\x37\x31\x2c\x30\x2e\
\x35\x33\x6c\x30\x2e\x35\x30\x36\x2c\x31\x2e\x31\x33\x33\x68\x37\
\x2e\x33\x32\x34\x63\x30\x2e\x33\x37\x2c\x30\x2c\x30\x2e\x35\x32\
\x32\x2c\x30\x2e\x31\x37\x37\x2c\x30\x2e\x34\x35\x38\x2c\x30\x2e\
\x35\x33\x6c\x2d\x30\x2e\x34\x30\x39\x2c\x32\x2e\x31\x34\x35\x63\
\x2d\x30\x2e\x30\x36\x34\x2c\x30\x2e\x33\x35\x34\x2d\x30\x2e\x32\
\x38\x31\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x36\x35\x2c\x30\x2e\x35\
\x33\x0d\x0a\x09\x09\x48\x37\x37\x2e\x32\x6c\x30\x2e\x36\x37\x35\
\x2c\x30\x2e\x31\x34\x35\x63\x31\x2e\x30\x32\x37\x2c\x30\x2e\x31\
\x39\x32\x2c\x31\x2e\x33\x32\x35\x2c\x30\x2e\x37\x36\x33\x2c\x30\
\x2e\x38\x39\x31\x2c\x31\x2e\x37\x31\x4c\x37\x38\x2e\x34\x37\x37\
\x2c\x31\x33\x2e\x32\x35\x7a\x20\x4d\x36\x34\x2e\x38\x36\x33\x2c\
\x32\x38\x2e\x36\x32\x33\x63\x2d\x30\x2e\x38\x38\x34\x2c\x30\x2d\
\x31\x2e\x33\x32\x35\x2d\x30\x2e\x34\x34\x32\x2d\x31\x2e\x33\x32\
\x35\x2d\x31\x2e\x33\x32\x36\x76\x2d\x38\x2e\x33\x33\x36\x0d\x0a\
\x09\x09\x63\x30\x2d\x30\x2e\x38\x38\x34\x2c\x30\x2e\x34\x34\x31\
\x2d\x31\x2e\x33\x32\x36\x2c\x31\x2e\x33\x32\x35\x2d\x31\x2e\x33\
\x32\x36\x68\x31\x34\x2e\x34\x30\x39\x63\x30\x2e\x38\x38\x33\x2c\
\x30\x2c\x31\x2e\x33\x32\x35\x2c\x30\x2e\x34\x34\x32\x2c\x31\x2e\
\x33\x32\x35\x2c\x31\x2e\x33\x32\x36\x76\x38\x2e\x33\x33\x36\x63\
\x30\x2c\x30\x2e\x38\x38\x34\x2d\x30\x2e\x34\x34\x32\x2c\x31\x2e\
\x33\x32\x36\x2d\x31\x2e\x33\x32\x35\x2c\x31\x2e\x33\x32\x36\x48\
\x36\x34\x2e\x38\x36\x33\x7a\x0d\x0a\x09\x09\x20\x4d\x37\x36\x2e\
\x37\x36\x37\x2c\x32\x31\x2e\x32\x39\x38\x63\x30\x2d\x30\x2e\x34\
\x36\x36\x2d\x30\x2e\x32\x33\x33\x2d\x30\x2e\x36\x39\x38\x2d\x30\
\x2e\x36\x39\x39\x2d\x30\x2e\x36\x39\x38\x68\x2d\x38\x2e\x31\x39\
\x32\x63\x2d\x30\x2e\x34\x36\x36\x2c\x30\x2d\x30\x2e\x36\x39\x38\
\x2c\x30\x2e\x32\x33\x32\x2d\x30\x2e\x36\x39\x38\x2c\x30\x2e\x36\
\x39\x38\x76\x30\x2e\x34\x35\x38\x68\x39\x2e\x35\x39\x56\x32\x31\
\x2e\x32\x39\x38\x7a\x20\x4d\x36\x37\x2e\x31\x37\x37\x2c\x32\x34\
\x2e\x39\x38\x34\x0d\x0a\x09\x09\x63\x30\x2c\x30\x2e\x34\x36\x36\
\x2c\x30\x2e\x32\x33\x32\x2c\x30\x2e\x36\x39\x39\x2c\x30\x2e\x36\
\x39\x38\x2c\x30\x2e\x36\x39\x39\x68\x38\x2e\x31\x39\x32\x63\x30\
\x2e\x34\x36\x36\x2c\x30\x2c\x30\x2e\x36\x39\x39\x2d\x30\x2e\x32\
\x33\x33\x2c\x30\x2e\x36\x39\x39\x2d\x30\x2e\x36\x39\x39\x56\x32\
\x34\x2e\x34\x33\x68\x2d\x39\x2e\x35\x39\x56\x32\x34\x2e\x39\x38\
\x34\x7a\x20\x4d\x37\x35\x2e\x32\x34\x38\x2c\x31\x30\x2e\x38\x31\
\x36\x68\x2d\x36\x2e\x35\x37\x37\x6c\x30\x2e\x39\x38\x37\x2c\x32\
\x2e\x34\x33\x34\x0d\x0a\x09\x09\x68\x34\x2e\x38\x36\x37\x4c\x37\
\x35\x2e\x32\x34\x38\x2c\x31\x30\x2e\x38\x31\x36\x7a\x22\x2f\x3e\
\x0d\x0a\x09\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\
\x46\x46\x46\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x31\x30\x37\x2e\
\x31\x30\x32\x2c\x31\x35\x2e\x36\x33\x36\x6c\x2d\x30\x2e\x34\x35\
\x38\x2c\x32\x2e\x34\x38\x31\x63\x2d\x30\x2e\x30\x34\x38\x2c\x30\
\x2e\x33\x35\x34\x2d\x30\x2e\x32\x35\x37\x2c\x30\x2e\x35\x33\x2d\
\x30\x2e\x36\x32\x36\x2c\x30\x2e\x35\x33\x48\x39\x35\x2e\x37\x35\
\x33\x0d\x0a\x09\x09\x63\x2d\x31\x2e\x31\x35\x36\x2c\x31\x2e\x38\
\x2d\x32\x2e\x33\x35\x34\x2c\x33\x2e\x33\x37\x33\x2d\x33\x2e\x35\
\x39\x2c\x34\x2e\x37\x32\x33\x63\x2d\x30\x2e\x33\x30\x36\x2c\x30\
\x2e\x33\x35\x33\x2d\x30\x2e\x32\x33\x33\x2c\x30\x2e\x35\x32\x32\
\x2c\x30\x2e\x32\x31\x37\x2c\x30\x2e\x35\x30\x36\x6c\x37\x2e\x38\
\x35\x34\x2d\x30\x2e\x34\x33\x34\x6c\x2d\x31\x2e\x38\x37\x39\x2d\
\x32\x2e\x36\x32\x36\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x32\x30\x39\
\x2d\x30\x2e\x32\x38\x39\x2d\x30\x2e\x31\x33\x37\x2d\x30\x2e\x34\
\x37\x35\x2c\x30\x2e\x32\x31\x37\x2d\x30\x2e\x35\x35\x35\x6c\x32\
\x2e\x35\x35\x34\x2d\x30\x2e\x37\x32\x33\x63\x30\x2e\x33\x35\x34\
\x2d\x30\x2e\x30\x38\x2c\x30\x2e\x36\x34\x33\x2c\x30\x2e\x30\x31\
\x36\x2c\x30\x2e\x38\x36\x37\x2c\x30\x2e\x32\x38\x39\x63\x30\x2e\
\x35\x36\x33\x2c\x30\x2e\x36\x34\x33\x2c\x31\x2e\x33\x33\x33\x2c\
\x31\x2e\x36\x39\x35\x2c\x32\x2e\x33\x31\x33\x2c\x33\x2e\x31\x35\
\x36\x0d\x0a\x09\x09\x63\x30\x2e\x39\x37\x39\x2c\x31\x2e\x34\x34\
\x35\x2c\x31\x2e\x36\x36\x32\x2c\x32\x2e\x35\x38\x36\x2c\x32\x2e\
\x30\x34\x38\x2c\x33\x2e\x34\x32\x32\x63\x30\x2e\x31\x34\x35\x2c\
\x30\x2e\x32\x38\x39\x2c\x30\x2e\x30\x34\x38\x2c\x30\x2e\x34\x38\
\x39\x2d\x30\x2e\x32\x38\x39\x2c\x30\x2e\x36\x30\x32\x6c\x2d\x32\
\x2e\x36\x39\x38\x2c\x30\x2e\x38\x39\x32\x63\x2d\x30\x2e\x33\x33\
\x38\x2c\x30\x2e\x31\x31\x32\x2d\x30\x2e\x35\x39\x35\x2c\x30\x2d\
\x30\x2e\x37\x37\x31\x2d\x30\x2e\x33\x33\x37\x0d\x0a\x09\x09\x6c\
\x2d\x30\x2e\x34\x33\x34\x2d\x30\x2e\x39\x31\x36\x63\x2d\x38\x2e\
\x30\x31\x36\x2c\x30\x2e\x36\x31\x2d\x31\x32\x2e\x34\x39\x37\x2c\
\x30\x2e\x39\x38\x38\x2d\x31\x33\x2e\x34\x34\x34\x2c\x31\x2e\x31\
\x33\x33\x6c\x2d\x30\x2e\x39\x34\x2c\x30\x2e\x32\x34\x63\x2d\x30\
\x2e\x33\x35\x34\x2c\x30\x2e\x30\x38\x2d\x30\x2e\x35\x36\x33\x2d\
\x30\x2e\x30\x35\x36\x2d\x30\x2e\x36\x32\x36\x2d\x30\x2e\x34\x30\
\x39\x6c\x2d\x30\x2e\x35\x37\x38\x2d\x32\x2e\x35\x33\x0d\x0a\x09\
\x09\x63\x2d\x30\x2e\x30\x36\x34\x2d\x30\x2e\x33\x35\x33\x2c\x30\
\x2e\x30\x34\x2d\x30\x2e\x36\x32\x36\x2c\x30\x2e\x33\x31\x33\x2d\
\x30\x2e\x38\x31\x38\x63\x2d\x30\x2e\x30\x34\x38\x2c\x30\x2e\x31\
\x36\x2c\x30\x2e\x33\x30\x36\x2d\x30\x2e\x31\x36\x2c\x31\x2e\x30\
\x36\x31\x2d\x30\x2e\x39\x36\x35\x63\x30\x2e\x33\x33\x37\x2d\x30\
\x2e\x33\x33\x36\x2c\x30\x2e\x38\x33\x35\x2d\x30\x2e\x39\x34\x37\
\x2c\x31\x2e\x34\x39\x34\x2d\x31\x2e\x38\x33\x6c\x31\x2e\x38\x33\
\x31\x2d\x32\x2e\x38\x31\x39\x68\x2d\x35\x2e\x37\x38\x33\x0d\x0a\
\x09\x09\x63\x2d\x30\x2e\x33\x36\x39\x2c\x30\x2d\x30\x2e\x35\x32\
\x39\x2d\x30\x2e\x31\x37\x37\x2d\x30\x2e\x34\x38\x31\x2d\x30\x2e\
\x35\x33\x6c\x30\x2e\x34\x35\x38\x2d\x32\x2e\x34\x38\x31\x63\x30\
\x2e\x30\x34\x38\x2d\x30\x2e\x33\x35\x34\x2c\x30\x2e\x32\x35\x37\
\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x36\x32\x36\x2d\x30\x2e\x35\x33\
\x68\x38\x2e\x30\x34\x38\x76\x2d\x32\x2e\x37\x39\x35\x68\x2d\x36\
\x2e\x37\x39\x35\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x33\x36\x39\x2c\
\x30\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x31\x37\x37\x2d\x30\x2e\x34\
\x38\x31\x2d\x30\x2e\x35\x33\x6c\x30\x2e\x34\x33\x34\x2d\x32\x2e\
\x34\x35\x38\x63\x30\x2e\x30\x34\x38\x2d\x30\x2e\x33\x35\x33\x2c\
\x30\x2e\x32\x36\x35\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x36\x35\x2d\
\x30\x2e\x35\x33\x68\x36\x2e\x31\x39\x32\x76\x2d\x32\x2e\x35\x33\
\x63\x30\x2d\x30\x2e\x33\x36\x39\x2c\x30\x2e\x31\x37\x37\x2d\x30\
\x2e\x35\x33\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x34\x38\x32\x0d\x0a\
\x09\x09\x6c\x32\x2e\x36\x37\x34\x2c\x30\x2e\x34\x35\x38\x63\x30\
\x2e\x33\x35\x34\x2c\x30\x2e\x30\x34\x38\x2c\x30\x2e\x35\x33\x2c\
\x30\x2e\x32\x35\x37\x2c\x30\x2e\x35\x33\x2c\x30\x2e\x36\x32\x36\
\x76\x31\x2e\x39\x32\x37\x68\x37\x2e\x30\x33\x36\x63\x30\x2e\x33\
\x36\x39\x2c\x30\x2c\x30\x2e\x35\x32\x39\x2c\x30\x2e\x31\x37\x37\
\x2c\x30\x2e\x34\x38\x31\x2c\x30\x2e\x35\x33\x6c\x2d\x30\x2e\x34\
\x35\x38\x2c\x32\x2e\x34\x35\x38\x0d\x0a\x09\x09\x63\x2d\x30\x2e\
\x30\x34\x38\x2c\x30\x2e\x33\x35\x34\x2d\x30\x2e\x32\x35\x37\x2c\
\x30\x2e\x35\x33\x2d\x30\x2e\x36\x32\x36\x2c\x30\x2e\x35\x33\x68\
\x2d\x36\x2e\x34\x33\x34\x76\x32\x2e\x37\x39\x35\x68\x38\x2e\x37\
\x34\x36\x43\x31\x30\x36\x2e\x39\x38\x39\x2c\x31\x35\x2e\x31\x30\
\x35\x2c\x31\x30\x37\x2e\x31\x34\x39\x2c\x31\x35\x2e\x32\x38\x32\
\x2c\x31\x30\x37\x2e\x31\x30\x32\x2c\x31\x35\x2e\x36\x33\x36\x7a\
\x22\x2f\x3e\x0d\x0a\x09\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\
\x3d\x22\x23\x46\x46\x46\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x31\
\x31\x35\x2e\x31\x39\x37\x2c\x31\x31\x2e\x33\x37\x31\x63\x30\x2e\
\x33\x36\x39\x2c\x30\x2c\x30\x2e\x37\x30\x37\x2c\x30\x2e\x31\x30\
\x35\x2c\x31\x2e\x30\x31\x32\x2c\x30\x2e\x33\x31\x33\x6c\x31\x2e\
\x33\x30\x32\x2c\x30\x2e\x38\x39\x32\x63\x30\x2e\x33\x30\x35\x2c\
\x30\x2e\x32\x30\x39\x2c\x30\x2e\x34\x32\x35\x2c\x30\x2e\x35\x30\
\x36\x2c\x30\x2e\x33\x36\x31\x2c\x30\x2e\x38\x39\x31\x0d\x0a\x09\
\x09\x63\x2d\x30\x2e\x33\x37\x2c\x32\x2e\x35\x30\x36\x2d\x31\x2e\
\x31\x35\x36\x2c\x35\x2e\x30\x30\x34\x2d\x32\x2e\x33\x36\x31\x2c\
\x37\x2e\x34\x39\x34\x63\x2d\x31\x2e\x31\x38\x39\x2c\x32\x2e\x34\
\x37\x33\x2d\x32\x2e\x34\x39\x38\x2c\x34\x2e\x32\x35\x36\x2d\x33\
\x2e\x39\x32\x38\x2c\x35\x2e\x33\x34\x39\x63\x2d\x30\x2e\x32\x35\
\x37\x2c\x30\x2e\x32\x30\x38\x2d\x30\x2e\x34\x39\x38\x2c\x30\x2e\
\x31\x37\x37\x2d\x30\x2e\x37\x32\x33\x2d\x30\x2e\x30\x39\x37\x6c\
\x2d\x31\x2e\x37\x33\x34\x2d\x32\x2e\x31\x39\x32\x0d\x0a\x09\x09\
\x63\x2d\x30\x2e\x32\x32\x36\x2d\x30\x2e\x32\x37\x32\x2d\x30\x2e\
\x32\x30\x31\x2d\x30\x2e\x35\x32\x32\x2c\x30\x2e\x30\x37\x32\x2d\
\x30\x2e\x37\x34\x37\x63\x32\x2e\x30\x38\x38\x2d\x31\x2e\x34\x31\
\x33\x2c\x33\x2e\x35\x36\x35\x2d\x33\x2e\x38\x36\x33\x2c\x34\x2e\
\x34\x33\x33\x2d\x37\x2e\x33\x34\x39\x63\x30\x2e\x31\x36\x31\x2d\
\x30\x2e\x35\x39\x34\x2c\x30\x2e\x30\x30\x39\x2d\x30\x2e\x38\x39\
\x32\x2d\x30\x2e\x34\x35\x37\x2d\x30\x2e\x38\x39\x32\x68\x2d\x33\
\x2e\x33\x35\x0d\x0a\x09\x09\x63\x2d\x30\x2e\x33\x36\x39\x2c\x30\
\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x31\x37\x37\x2d\x30\x2e\x34\x38\
\x31\x2d\x30\x2e\x35\x33\x6c\x30\x2e\x34\x35\x37\x2d\x32\x2e\x34\
\x38\x31\x63\x30\x2e\x30\x34\x39\x2d\x30\x2e\x33\x35\x33\x2c\x30\
\x2e\x32\x35\x37\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x36\x32\x37\x2d\
\x30\x2e\x35\x33\x4c\x31\x31\x35\x2e\x31\x39\x37\x2c\x31\x31\x2e\
\x33\x37\x31\x7a\x20\x4d\x31\x32\x36\x2e\x38\x31\x31\x2c\x31\x30\
\x2e\x31\x39\x0d\x0a\x09\x09\x63\x30\x2e\x32\x30\x39\x2d\x30\x2e\
\x32\x37\x33\x2c\x30\x2e\x34\x34\x32\x2d\x30\x2e\x32\x38\x31\x2c\
\x30\x2e\x36\x39\x39\x2d\x30\x2e\x30\x32\x34\x6c\x31\x2e\x39\x37\
\x36\x2c\x32\x63\x30\x2e\x32\x35\x37\x2c\x30\x2e\x32\x35\x37\x2c\
\x30\x2e\x32\x34\x31\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x30\x34\x38\
\x2c\x30\x2e\x38\x31\x39\x63\x2d\x31\x2e\x34\x33\x2c\x31\x2e\x33\
\x39\x37\x2d\x33\x2e\x30\x32\x31\x2c\x32\x2e\x37\x36\x33\x2d\x34\
\x2e\x37\x37\x31\x2c\x34\x2e\x30\x39\x36\x0d\x0a\x09\x09\x63\x31\
\x2e\x38\x36\x32\x2c\x32\x2e\x38\x34\x33\x2c\x34\x2e\x30\x38\x37\
\x2c\x34\x2e\x38\x32\x37\x2c\x36\x2e\x36\x37\x34\x2c\x35\x2e\x39\
\x35\x32\x63\x30\x2e\x33\x35\x33\x2c\x30\x2e\x31\x34\x35\x2c\x30\
\x2e\x34\x30\x31\x2c\x30\x2e\x33\x33\x37\x2c\x30\x2e\x31\x34\x35\
\x2c\x30\x2e\x35\x37\x38\x4c\x31\x32\x39\x2e\x31\x2c\x32\x35\x2e\
\x39\x63\x2d\x30\x2e\x32\x35\x37\x2c\x30\x2e\x32\x34\x2d\x30\x2e\
\x35\x33\x38\x2c\x30\x2e\x32\x36\x35\x2d\x30\x2e\x38\x34\x33\x2c\
\x30\x2e\x30\x37\x32\x0d\x0a\x09\x09\x63\x2d\x32\x2e\x32\x34\x39\
\x2d\x31\x2e\x31\x37\x33\x2d\x34\x2e\x32\x39\x37\x2d\x33\x2e\x34\
\x31\x34\x2d\x36\x2e\x31\x34\x35\x2d\x36\x2e\x37\x32\x33\x76\x35\
\x2e\x30\x31\x32\x63\x30\x2c\x30\x2e\x39\x39\x36\x2d\x30\x2e\x31\
\x30\x34\x2c\x31\x2e\x37\x36\x37\x2d\x30\x2e\x33\x31\x33\x2c\x32\
\x2e\x33\x31\x33\x63\x2d\x30\x2e\x32\x30\x38\x2c\x30\x2e\x35\x34\
\x36\x2d\x30\x2e\x35\x37\x38\x2c\x30\x2e\x39\x37\x32\x2d\x31\x2e\
\x31\x30\x37\x2c\x31\x2e\x32\x37\x37\x0d\x0a\x09\x09\x63\x2d\x30\
\x2e\x39\x33\x33\x2c\x30\x2e\x35\x32\x39\x2d\x32\x2e\x32\x35\x38\
\x2c\x30\x2e\x37\x38\x37\x2d\x33\x2e\x39\x37\x36\x2c\x30\x2e\x37\
\x37\x31\x63\x2d\x30\x2e\x34\x38\x32\x2c\x30\x2e\x30\x31\x36\x2d\
\x30\x2e\x37\x35\x36\x2d\x30\x2e\x31\x35\x33\x2d\x30\x2e\x38\x31\
\x39\x2d\x30\x2e\x35\x30\x36\x6c\x2d\x30\x2e\x36\x30\x33\x2d\x32\
\x2e\x38\x32\x63\x2d\x30\x2e\x30\x36\x34\x2d\x30\x2e\x33\x35\x33\
\x2c\x30\x2e\x30\x38\x2d\x30\x2e\x35\x32\x39\x2c\x30\x2e\x34\x33\
\x34\x2d\x30\x2e\x35\x32\x39\x0d\x0a\x09\x09\x63\x30\x2e\x31\x37\
\x37\x2c\x30\x2e\x30\x31\x36\x2c\x30\x2e\x38\x36\x37\x2c\x30\x2e\
\x30\x32\x33\x2c\x32\x2e\x30\x37\x32\x2c\x30\x2e\x30\x32\x33\x63\
\x30\x2e\x33\x38\x36\x2c\x30\x2c\x30\x2e\x35\x37\x38\x2d\x30\x2e\
\x31\x38\x34\x2c\x30\x2e\x35\x37\x38\x2d\x30\x2e\x35\x35\x34\x56\
\x36\x2e\x32\x38\x37\x63\x30\x2d\x30\x2e\x33\x37\x2c\x30\x2e\x31\
\x37\x37\x2d\x30\x2e\x35\x33\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x34\
\x38\x32\x6c\x32\x2e\x36\x37\x34\x2c\x30\x2e\x34\x35\x38\x0d\x0a\
\x09\x09\x63\x30\x2e\x33\x35\x34\x2c\x30\x2e\x30\x34\x38\x2c\x30\
\x2e\x35\x33\x2c\x30\x2e\x32\x35\x37\x2c\x30\x2e\x35\x33\x2c\x30\
\x2e\x36\x32\x36\x76\x34\x2e\x37\x39\x35\x63\x30\x2e\x32\x34\x31\
\x2c\x30\x2e\x37\x35\x35\x2c\x30\x2e\x35\x37\x2c\x31\x2e\x36\x31\
\x34\x2c\x30\x2e\x39\x38\x38\x2c\x32\x2e\x35\x37\x38\x43\x31\x32\
\x34\x2e\x37\x33\x39\x2c\x31\x32\x2e\x37\x33\x36\x2c\x31\x32\x35\
\x2e\x39\x37\x36\x2c\x31\x31\x2e\x33\x37\x39\x2c\x31\x32\x36\x2e\
\x38\x31\x31\x2c\x31\x30\x2e\x31\x39\x7a\x22\x2f\x3e\x0d\x0a\x09\
\x3c\x70\x61\x74\x68\x20\x66\x69\x6c\x6c\x3d\x22\x23\x46\x46\x46\
\x46\x46\x46\x22\x20\x64\x3d\x22\x4d\x31\x34\x30\x2e\x38\x35\x38\
\x2c\x36\x2e\x35\x35\x32\x63\x30\x2e\x33\x30\x35\x2d\x30\x2e\x31\
\x34\x35\x2c\x30\x2e\x35\x36\x32\x2d\x30\x2e\x30\x37\x32\x2c\x30\
\x2e\x37\x37\x31\x2c\x30\x2e\x32\x31\x37\x6c\x31\x2e\x34\x39\x34\
\x2c\x32\x2e\x31\x39\x33\x63\x30\x2e\x32\x30\x39\x2c\x30\x2e\x32\
\x38\x39\x2c\x30\x2e\x31\x36\x2c\x30\x2e\x34\x38\x32\x2d\x30\x2e\
\x31\x34\x35\x2c\x30\x2e\x35\x37\x38\x0d\x0a\x09\x09\x63\x2d\x30\
\x2e\x39\x36\x34\x2c\x30\x2e\x33\x37\x2d\x32\x2e\x33\x36\x39\x2c\
\x30\x2e\x37\x36\x33\x2d\x34\x2e\x32\x31\x37\x2c\x31\x2e\x31\x38\
\x31\x63\x2d\x30\x2e\x36\x31\x2c\x30\x2e\x31\x32\x39\x2d\x30\x2e\
\x39\x31\x35\x2c\x30\x2e\x34\x32\x36\x2d\x30\x2e\x39\x31\x35\x2c\
\x30\x2e\x38\x39\x32\x76\x32\x2e\x30\x39\x36\x68\x34\x2e\x39\x31\
\x35\x63\x30\x2e\x33\x36\x39\x2c\x30\x2c\x30\x2e\x35\x32\x31\x2c\
\x30\x2e\x31\x37\x37\x2c\x30\x2e\x34\x35\x38\x2c\x30\x2e\x35\x33\
\x0d\x0a\x09\x09\x6c\x2d\x30\x2e\x34\x31\x2c\x32\x2e\x33\x31\x33\
\x63\x2d\x30\x2e\x30\x36\x34\x2c\x30\x2e\x33\x35\x34\x2d\x30\x2e\
\x32\x38\x31\x2c\x30\x2e\x35\x33\x2d\x30\x2e\x36\x35\x2c\x30\x2e\
\x35\x33\x68\x2d\x34\x2e\x33\x31\x33\x76\x33\x2e\x36\x31\x34\x63\
\x30\x2c\x30\x2e\x34\x36\x37\x2c\x30\x2e\x32\x32\x35\x2c\x30\x2e\
\x36\x35\x38\x2c\x30\x2e\x36\x37\x35\x2c\x30\x2e\x35\x37\x38\x6c\
\x34\x2e\x32\x34\x2d\x30\x2e\x37\x37\x31\x0d\x0a\x09\x09\x63\x30\
\x2e\x33\x35\x34\x2d\x30\x2e\x30\x36\x33\x2c\x30\x2e\x34\x39\x38\
\x2c\x30\x2e\x30\x38\x31\x2c\x30\x2e\x34\x33\x34\x2c\x30\x2e\x34\
\x33\x35\x6c\x2d\x30\x2e\x34\x38\x31\x2c\x32\x2e\x34\x33\x34\x63\
\x2d\x30\x2e\x30\x36\x34\x2c\x30\x2e\x33\x35\x33\x2d\x30\x2e\x33\
\x34\x36\x2c\x30\x2e\x35\x37\x38\x2d\x30\x2e\x38\x34\x34\x2c\x30\
\x2e\x36\x37\x35\x63\x2d\x32\x2e\x32\x39\x37\x2c\x30\x2e\x34\x38\
\x31\x2d\x34\x2e\x30\x32\x33\x2c\x30\x2e\x38\x37\x35\x2d\x35\x2e\
\x31\x38\x31\x2c\x31\x2e\x31\x38\x0d\x0a\x09\x09\x63\x2d\x31\x2e\
\x31\x35\x36\x2c\x30\x2e\x32\x38\x39\x2d\x31\x2e\x38\x35\x34\x2c\
\x30\x2e\x35\x30\x36\x2d\x32\x2e\x30\x39\x36\x2c\x30\x2e\x36\x35\
\x63\x2d\x30\x2e\x32\x32\x36\x2c\x30\x2e\x31\x31\x33\x2d\x30\x2e\
\x33\x37\x2d\x30\x2e\x30\x30\x38\x2d\x30\x2e\x34\x33\x34\x2d\x30\
\x2e\x33\x36\x31\x6c\x2d\x30\x2e\x35\x35\x35\x2d\x33\x2e\x30\x31\
\x32\x63\x2d\x30\x2e\x30\x36\x34\x2d\x30\x2e\x33\x35\x33\x2c\x30\
\x2e\x30\x34\x39\x2d\x30\x2e\x36\x35\x38\x2c\x30\x2e\x33\x33\x38\
\x2d\x30\x2e\x39\x31\x35\x0d\x0a\x09\x09\x63\x30\x2e\x32\x32\x35\
\x2c\x30\x2e\x30\x39\x37\x2c\x30\x2e\x33\x33\x37\x2d\x30\x2e\x32\
\x34\x31\x2c\x30\x2e\x33\x33\x37\x2d\x31\x2e\x30\x31\x33\x56\x39\
\x2e\x38\x35\x33\x63\x30\x2d\x30\x2e\x38\x33\x35\x2c\x30\x2e\x34\
\x37\x34\x2d\x31\x2e\x33\x35\x37\x2c\x31\x2e\x34\x32\x32\x2d\x31\
\x2e\x35\x36\x36\x43\x31\x33\x37\x2e\x34\x36\x39\x2c\x37\x2e\x39\
\x30\x31\x2c\x31\x33\x39\x2e\x31\x38\x38\x2c\x37\x2e\x33\x32\x33\
\x2c\x31\x34\x30\x2e\x38\x35\x38\x2c\x36\x2e\x35\x35\x32\x7a\x0d\
\x0a\x09\x09\x20\x4d\x31\x35\x33\x2e\x33\x33\x39\x2c\x37\x2e\x33\
\x34\x37\x63\x30\x2e\x38\x38\x34\x2c\x30\x2c\x31\x2e\x33\x32\x35\
\x2c\x30\x2e\x34\x34\x32\x2c\x31\x2e\x33\x32\x35\x2c\x31\x2e\x33\
\x32\x35\x56\x32\x31\x2e\x34\x39\x63\x30\x2c\x31\x2e\x36\x35\x34\
\x2d\x30\x2e\x34\x31\x38\x2c\x32\x2e\x37\x33\x39\x2d\x31\x2e\x32\
\x35\x33\x2c\x33\x2e\x32\x35\x33\x63\x2d\x30\x2e\x37\x37\x31\x2c\
\x30\x2e\x34\x38\x31\x2d\x31\x2e\x37\x37\x34\x2c\x30\x2e\x37\x32\
\x34\x2d\x33\x2e\x30\x31\x32\x2c\x30\x2e\x37\x32\x34\x0d\x0a\x09\
\x09\x63\x2d\x30\x2e\x34\x38\x31\x2c\x30\x2d\x30\x2e\x37\x36\x33\
\x2d\x30\x2e\x31\x37\x38\x2d\x30\x2e\x38\x34\x33\x2d\x30\x2e\x35\
\x33\x31\x6c\x2d\x30\x2e\x35\x37\x38\x2d\x32\x2e\x36\x32\x36\x63\
\x2d\x30\x2e\x30\x38\x31\x2d\x30\x2e\x33\x35\x33\x2c\x30\x2e\x30\
\x35\x36\x2d\x30\x2e\x35\x32\x31\x2c\x30\x2e\x34\x30\x39\x2d\x30\
\x2e\x35\x30\x36\x63\x2d\x30\x2e\x30\x38\x2c\x30\x2c\x30\x2e\x33\
\x35\x34\x2c\x30\x2c\x31\x2e\x33\x30\x31\x2c\x30\x0d\x0a\x09\x09\
\x63\x30\x2e\x32\x38\x39\x2c\x30\x2c\x30\x2e\x34\x33\x34\x2d\x30\
\x2e\x31\x34\x35\x2c\x30\x2e\x34\x33\x34\x2d\x30\x2e\x34\x33\x34\
\x76\x2d\x39\x2e\x38\x35\x34\x63\x30\x2d\x30\x2e\x34\x36\x36\x2d\
\x30\x2e\x32\x33\x32\x2d\x30\x2e\x36\x39\x39\x2d\x30\x2e\x36\x39\
\x38\x2d\x30\x2e\x36\x39\x39\x68\x2d\x31\x2e\x37\x38\x33\x63\x2d\
\x30\x2e\x34\x36\x36\x2c\x30\x2d\x30\x2e\x36\x39\x38\x2c\x30\x2e\
\x32\x33\x33\x2d\x30\x2e\x36\x39\x38\x2c\x30\x2e\x36\x39\x39\x56\
\x32\x38\x2e\x30\x32\x0d\x0a\x09\x09\x63\x30\x2c\x30\x2e\x33\x37\
\x2d\x30\x2e\x31\x37\x38\x2c\x30\x2e\x35\x33\x31\x2d\x30\x2e\x35\
\x33\x2c\x30\x2e\x34\x38\x32\x6c\x2d\x32\x2e\x34\x38\x31\x2d\x30\
\x2e\x34\x35\x37\x63\x2d\x30\x2e\x33\x35\x34\x2d\x30\x2e\x30\x34\
\x39\x2d\x30\x2e\x35\x33\x2d\x30\x2e\x32\x35\x38\x2d\x30\x2e\x35\
\x33\x2d\x30\x2e\x36\x32\x37\x56\x38\x2e\x36\x37\x32\x63\x30\x2d\
\x30\x2e\x38\x38\x33\x2c\x30\x2e\x34\x34\x31\x2d\x31\x2e\x33\x32\
\x35\x2c\x31\x2e\x33\x32\x35\x2d\x31\x2e\x33\x32\x35\x48\x31\x35\
\x33\x2e\x33\x33\x39\x7a\x22\x0d\x0a\x09\x09\x2f\x3e\x0d\x0a\x3c\
\x2f\x67\x3e\x0d\x0a\x3c\x2f\x73\x76\x67\x3e\x0d\x0a\
\x00\x00\x02\x17\
\x00\
\x00\x07\x01\x78\x9c\xe5\x55\xcd\x8a\xdb\x30\x10\x7e\x95\x41\xb9\
\xec\x82\x63\xcb\x3f\xb2\xa2\x10\x07\xb6\x21\x39\xa5\xf4\xd0\x5b\
\x2f\xc5\xbb\x56\x6c\xb3\x8e\x1d\x6c\x25\xce\x3e\x44\xe9\xa9\xb7\
\xbe\x5e\xa1\x8f\xb1\x23\x39\xbb\x4d\x42\x02\x66\x61\x0f\x4b\x65\
\xf4\x33\x9a\x6f\x46\xf3\x7d\x12\x78\xd2\xec\x52\xd8\xaf\x8b\xb2\
\x89\x48\xa6\xd4\x66\xec\x38\x6d\xdb\xda\xad\x6f\x57\x75\xea\x78\
\x94\x52\x07\x11\xa4\x83\x8c\xf7\x45\x5e\x3e\x5e\x02\xba\x42\x08\
\xc7\x78\x09\xb4\x79\xa2\xb2\x88\x08\x4a\x09\x64\x32\x4f\x33\x15\
\x91\x50\x1b\xbb\x5c\xb6\x9f\xaa\x7d\x44\x28\x50\x40\x37\x98\xdd\
\x55\x5e\x14\x11\x29\xab\x52\x92\xe9\x24\x91\xab\x66\x3a\xa9\xe5\
\x83\x82\x3c\x89\xc8\x26\x56\xd9\x77\xc4\xe8\x18\x02\x4f\x66\xbc\
\x9a\xde\x99\x4e\x9c\x2e\x3e\x85\x6a\x13\x3f\xe4\x0a\x03\x5c\x02\
\xaa\x8e\xcb\x66\x55\xd5\xeb\x88\x98\x65\x11\x2b\x79\x83\x25\xdc\
\x02\xd4\x95\xea\x8c\x80\x51\xf0\x29\xbd\x25\x87\xc3\xbb\xa2\x06\
\x0b\xd3\xc8\xdb\xd3\xf5\xae\x7c\x1d\x37\x8f\x86\xf2\x7d\x3a\xd4\
\xeb\x17\x5d\xda\x2c\x57\x5a\x98\x6d\x23\xc1\xe8\x3b\xce\x6a\xb9\
\xc2\xd2\x0e\xd2\x20\x67\x74\xe1\xa8\x83\x34\x73\x3d\x47\x64\x5b\
\x17\x37\x83\x43\x2a\xac\x62\x3a\xd1\x70\x93\xff\xef\xcf\xdf\x7f\
\x7e\xfc\xea\xd2\x0f\xeb\x6d\x21\x23\x22\x77\xb2\xac\x92\x84\x40\
\xa3\x9e\xb4\xad\x5d\xe3\xc1\x9c\xe9\xef\x2a\x5f\xef\x84\xb0\x4b\
\x19\x78\x42\x13\x3e\x96\x0a\x8f\xfb\x4c\x2d\x36\xa2\x4b\xcf\xfd\
\x37\xd3\x65\xd7\xd1\xfe\x06\x86\xfc\xdb\xee\xcb\x35\xd7\xd5\x23\
\x96\xd9\xfe\x88\xb9\x21\x13\x82\xfb\xbe\xe0\x34\x94\x43\xd7\x3f\
\xcb\x17\x08\x5b\x1c\x35\x8e\xd9\x6d\x7a\xd2\x98\x3e\xed\x55\xc6\
\x59\xb5\xbe\xcf\x4b\x99\xc0\xd7\x2c\xde\xc8\x3e\x72\x06\xf3\xbb\
\xf9\xa2\x27\xbb\x5e\xd5\x5c\x90\xda\xa3\x4b\x7c\x5d\x2f\x53\x38\
\x33\x86\x1d\x0a\x18\x09\x6e\xfb\xae\x45\x71\x11\xa0\xf8\xa1\x45\
\x67\x7a\x1f\x37\x0e\x00\x44\x2f\x75\xe0\xe1\x46\x5e\x79\x7e\xd9\
\xc5\x45\x0f\x76\x0b\x81\xdf\xdd\x15\x76\x1c\xf8\x59\xf5\xc1\x31\
\x5b\xdf\x66\xd8\x03\x71\xd2\xc2\x0b\xfc\xb8\x85\xd0\x19\xb7\x5c\
\x9b\x71\x60\x76\xe0\x5b\x26\x18\xb9\xe8\x1d\xc3\xc5\xb8\xa8\xc1\
\xe1\x03\x43\x08\x18\x17\x37\x38\x3e\x33\x41\x58\x4e\xe7\x32\xf9\
\xde\x83\xb0\xfb\xdf\x31\xf6\x3e\x02\x63\x27\x3d\xeb\xf8\x67\x9b\
\x3e\x03\xf1\x7a\xec\xab\
"
qt_resource_name = b"\
\x00\x03\
\x00\x00\x70\x37\
\x00\x69\
\x00\x6d\x00\x67\
\x00\x13\
\x0a\x25\xf2\x27\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x2d\x00\x68\x00\x6f\x00\x72\x00\x69\x00\x7a\x00\x6f\x00\x6e\x00\x74\x00\x61\x00\x6c\x00\x2e\x00\x73\
\x00\x76\x00\x67\
\x00\x06\
\x03\x32\x4a\x27\
\x80\xcc\
\x66\x6f\x00\x2e\x00\x73\x00\x76\x00\x67\
"
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) | null |
155,955 | import time
import random
def base36_encode(number):
"""Converts an integer to a base36 string."""
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
base36 = []
while number:
number, i = divmod(number, 36)
base36.append(alphabet[i])
return ''.join(reversed(base36))
def create_s_v_web_id():
e = list("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
t = len(e)
n = base36_encode(int(time.time()*1000)) # Convert timestamp to base 36
r = [''] * 36
r[8] = r[13] = r[18] = r[23] = "_"
r[14] = "4"
for i in range(36):
if not r[i]:
o = int(random.random() * t)
r[i] = e[3 & o | 8 if i == 19 else o]
return "verify_" + n + "_" + "".join(r) | null |
155,956 | import time
import execjs
import requests
from flask import Flask
from flask import request
from flask import jsonify
from urllib.parse import urlencode
from urllib.parse import unquote
from urllib.parse import parse_qsl
def index():
tips = {
"status_code": "-1",
"time": {
"strftime": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
"timestamp": int(round(time.time() * 1000))
},
"path": {
0: "/xg/path/?url=",
2: "/x-tt-params/path"
}
}
print(tips)
return jsonify(tips) | null |
155,957 | import time
import execjs
import requests
from flask import Flask
from flask import request
from flask import jsonify
from urllib.parse import urlencode
from urllib.parse import unquote
from urllib.parse import parse_qsl
def xgpath():
path = request.args.get('url', '')
# 如果str路径为空
if not path:
tips = {
"status_code": "-3",
"time": {
"strftime": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
"timestamp": int(round(time.time() * 1000))
},
"message": {
0: "The key url cannot be empty and the need for url encoding, The '&' sign needs to be escaped to '%26', Use urllib.parse.quote(url) to escape. Example:/xg/path/?url=aid=6383%26sec_user_id=xxx%26max_cursor=0%26count=10",
1: "url参数不能为空,且需要注意传入值中的“&”需要转义成“%26”,使用urllib.parse.quote(url)转义. 例如:/xg/path/?url=aid=6383%26sec_user_id=xxx%26max_cursor=0%26count=10"
}
}
print(tips)
return jsonify(tips)
else:
# url转字典
params = dict(parse_qsl(path))
# 字典转url
url_path = urlencode(params, safe="=")
return server.getXG(url_path, params) | null |
155,958 | import time
import execjs
import requests
from flask import Flask
from flask import request
from flask import jsonify
from urllib.parse import urlencode
from urllib.parse import unquote
from urllib.parse import parse_qsl
def xttppath():
try:
path = request.json
except:
pass
if not path:
tips = {
"status_code": "-5",
"time": {
"strftime": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
"timestamp": int(round(time.time() * 1000))
},
"message": {
0: "Body uses raw JSON format to pass dictionary parameters, such as %s" % '{"aid": 1988,"app_name": "tiktok_web","channel": "tiktok_web".........}',
1: "body中使用raw json格式传递字典参数,如%s" % '{"aid": 1988,"app_name": "tiktok_web","channel": "tiktok_web".........}'
}
}
print(tips)
return jsonify(tips)
else:
return server.getxttparams(path) | null |
155,959 | import time
import execjs
import requests
from flask import Flask
from flask import request
from flask import jsonify
from urllib.parse import urlencode
from urllib.parse import unquote
from urllib.parse import parse_qsl
def ttwid():
return server.gen_ttwid() | null |
155,972 | import json
from tqdm import tqdm
from utils.models_utils import save_tuned_G
from datasets.image_list_dataset import ImageListDataset
from training.coaches.coach import Coach
from utils.data_utils import make_dataset
import os
import click
import numpy as np
import torch
import wandb
from PIL import Image
from torchvision import transforms
from configs import paths_config, global_config, hyperparameters
from utils.alignment import crop_faces, calc_alignment_coefficients
def save_image(image: Image.Image, output_folder, image_name, image_index, ext='jpg'):
if ext == 'jpeg' or ext == 'jpg':
image = image.convert('RGB')
folder = os.path.join(output_folder, image_name)
os.makedirs(folder, exist_ok=True)
image.save(os.path.join(folder, f'{image_index}.{ext}'))
def paste_image(coeffs, img, orig_image):
pasted_image = orig_image.copy().convert('RGBA')
projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, coeffs, Image.BILINEAR)
pasted_image.paste(projected, (0, 0), mask=projected)
return pasted_image
def to_pil_image(tensor: torch.Tensor) -> Image.Image:
x = (tensor[0].permute(1, 2, 0) + 1) * 255 / 2
x = x.detach().cpu().numpy()
x = np.rint(x).clip(0, 255).astype(np.uint8)
return Image.fromarray(x)
def save_tuned_G(generator, pivots, quads, run_id):
generator = copy.deepcopy(generator).cpu()
pivots = copy.deepcopy(pivots).cpu()
torch.save({'generator': generator, 'pivots': pivots, 'quads': quads},
f'{paths_config.checkpoints_dir}/model_{run_id}.pt')
class ImageListDataset(Dataset):
def __init__(self, images, source_transform=None, names=None):
self.images = images
self.source_transform = source_transform
self.names = names
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = self.images[index]
name = self.names[index] if self.names is not None else str(index)
from_im = image.convert('RGB')
if self.source_transform:
from_im = self.source_transform(from_im)
return name, from_im
class Coach:
def __init__(self, dataset, use_wandb):
self.use_wandb = use_wandb
self.dataset = dataset
if hyperparameters.first_inv_type == 'e4e':
self.e4e_inversion_net = initialize_e4e_wplus()
self.e4e_image_transform = transforms.Resize((256, 256))
# Initialize loss
self.lpips_loss = LPIPS(net=hyperparameters.lpips_type).to(global_config.device).eval()
self.restart_training()
# Initialize checkpoint dir
self.checkpoint_dir = paths_config.checkpoints_dir
os.makedirs(self.checkpoint_dir, exist_ok=True)
def restart_training(self):
# Initialize networks
self.G = load_old_G()
self.G.requires_grad_(True)
self.original_G = load_old_G()
self.space_regularizer = SpaceRegularizer(self.original_G, self.lpips_loss)
self.optimizer = self.configure_optimizers()
def get_inversion(self, image_name, image):
if hyperparameters.first_inv_type == 'e4e':
w_pivot = self.get_e4e_inversion(image)
else:
id_image = torch.squeeze((image.to(global_config.device) + 1) / 2) * 255
w_pivot = w_projector.project(self.G, id_image, device=torch.device(global_config.device),
w_avg_samples=600,
num_steps=hyperparameters.first_inv_steps, w_name=image_name,
use_wandb=self.use_wandb)
w_pivot = w_pivot.to(global_config.device)
return w_pivot
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.G.parameters(), betas=(hyperparameters.pti_adam_beta1, 0.999),
lr=hyperparameters.pti_learning_rate)
return optimizer
def calc_loss(self, generated_images, real_images, log_name, new_G, use_ball_holder, w_batch):
loss = 0.0
if hyperparameters.pt_l2_lambda > 0:
l2_loss_val = l2_loss.l2_loss(generated_images, real_images)
if self.use_wandb:
wandb.log({f'losses/MSE_loss_val_{log_name}': l2_loss_val.detach().cpu()}, commit=False)
loss += l2_loss_val * hyperparameters.pt_l2_lambda
if hyperparameters.pt_lpips_lambda > 0:
loss_lpips = self.lpips_loss(generated_images, real_images)
loss_lpips = torch.squeeze(loss_lpips)
if self.use_wandb:
wandb.log({f'losses/LPIPS_loss_val_{log_name}': loss_lpips.detach().cpu()}, commit=False)
loss += loss_lpips * hyperparameters.pt_lpips_lambda
if use_ball_holder and hyperparameters.use_locality_regularization:
ball_holder_loss_val = self.space_regularizer.space_regularizer_loss(new_G, w_batch, log_name,
use_wandb=self.use_wandb)
loss += ball_holder_loss_val
return loss, l2_loss_val, loss_lpips
def forward(self, w):
generated_images = self.G.synthesis(w, noise_mode='const', force_fp32=True)
return generated_images
def get_e4e_inversion(self, image):
new_image = self.e4e_image_transform(image).to(global_config.device)
_, w = self.e4e_inversion_net(new_image.unsqueeze(0), randomize_noise=False, return_latents=True, resize=False,
input_code=False)
if self.use_wandb:
log_image_from_w(w, self.G, 'First e4e inversion')
return w
def train(self):
self.G.synthesis.train()
self.G.mapping.train()
use_ball_holder = True
w_pivots = []
images = []
print('Calculating initial inversions')
for fname, image in tqdm(self.dataset):
image_name = fname
w_pivot = self.get_inversion(image_name, image)
w_pivots.append(w_pivot)
images.append((image_name, image))
self.G = self.G.to(global_config.device)
print('Fine tuning generator')
for step in trange(hyperparameters.max_pti_steps):
step_loss_dict = defaultdict(list)
t = (step + 1) / hyperparameters.max_pti_steps
if hyperparameters.use_lr_ramp:
lr_ramp = min(1.0, (1.0 - t) / hyperparameters.lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / hyperparameters.lr_rampup_length)
lr = hyperparameters.pti_learning_rate * lr_ramp
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
for data, w_pivot in zip(images, w_pivots):
image_name, image = data
image = image.unsqueeze(0)
real_images_batch = image.to(global_config.device)
generated_images = self.forward(w_pivot)
loss, l2_loss_val, loss_lpips = self.calc_loss(generated_images, real_images_batch, image_name,
self.G, use_ball_holder, w_pivot)
step_loss_dict['loss'].append(loss.item())
step_loss_dict['l2_loss'].append(l2_loss_val.item())
step_loss_dict['loss_lpips'].append(loss_lpips.item())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
use_ball_holder = global_config.training_step % hyperparameters.locality_regularization_interval == 0
global_config.training_step += 1
log_dict = {}
for key, losses in step_loss_dict.items():
loss_mean = sum(losses) / len(losses)
loss_max = max(losses)
log_dict[f'losses_agg/{key}_mean'] = loss_mean
log_dict[f'losses_agg/{key}_max'] = loss_max
if self.use_wandb:
wandb.log(log_dict)
print('Finished training')
if self.use_wandb:
log_images_from_w(w_pivots, self.G, [image[0] for image in images])
w_pivots = torch.cat(w_pivots)
return w_pivots
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for fname in sorted(os.listdir(dir)):
if is_image_file(fname):
path = os.path.join(dir, fname)
fname = fname.split('.')[0]
images.append((fname, path))
return images
def crop_faces(IMAGE_SIZE, files, scale, center_sigma=0.0, xy_sigma=0.0, use_fa=False):
if use_fa:
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True)
predictor = None
detector = None
else:
fa = None
predictor = dlib.shape_predictor(paths_config.shape_predictor_path)
detector = dlib.get_frontal_face_detector()
cs, xs, ys = [], [], []
for _, path in tqdm(files):
c, x, y = compute_transform(path, predictor, detector=detector,
scale=scale, fa=fa)
cs.append(c)
xs.append(x)
ys.append(y)
cs = np.stack(cs)
xs = np.stack(xs)
ys = np.stack(ys)
if center_sigma != 0:
cs = gaussian_filter1d(cs, sigma=center_sigma, axis=0)
if xy_sigma != 0:
xs = gaussian_filter1d(xs, sigma=xy_sigma, axis=0)
ys = gaussian_filter1d(ys, sigma=xy_sigma, axis=0)
quads = np.stack([cs - xs - ys, cs - xs + ys, cs + xs + ys, cs + xs - ys], axis=1)
quads = list(quads)
crops, orig_images = crop_faces_by_quads(IMAGE_SIZE, files, quads)
return crops, orig_images, quads
def calc_alignment_coefficients(pa, pb):
matrix = []
for p1, p2 in zip(pa, pb):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
a = np.matrix(matrix, dtype=float)
b = np.array(pb).reshape(8)
res = np.dot(np.linalg.inv(a.T * a) * a.T, b)
return np.array(res).reshape(8)
def _main(input_folder, output_folder, start_frame, end_frame, run_name,
scale, num_pti_steps, l2_lambda, center_sigma, xy_sigma,
use_fa, use_locality_reg, use_wandb, config, pti_learning_rate, pti_adam_beta1):
global_config.run_name = run_name
hyperparameters.max_pti_steps = num_pti_steps
hyperparameters.pt_l2_lambda = l2_lambda
hyperparameters.use_locality_regularization = use_locality_reg
hyperparameters.pti_learning_rate = pti_learning_rate
hyperparameters.pti_adam_beta1 = pti_adam_beta1
if use_wandb:
wandb.init(project=paths_config.pti_results_keyword, reinit=True, name=global_config.run_name, config=config)
files = make_dataset(input_folder)
files = files[start_frame:end_frame]
print(f'Number of images: {len(files)}')
image_size = 1024
print('Aligning images')
crops, orig_images, quads = crop_faces(image_size, files, scale,
center_sigma=center_sigma, xy_sigma=xy_sigma, use_fa=use_fa)
print('Aligning completed')
ds = ImageListDataset(crops, transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]))
coach = Coach(ds, use_wandb)
ws = coach.train()
save_tuned_G(coach.G, ws, quads, global_config.run_name)
inverse_transforms = [
calc_alignment_coefficients(quad + 0.5, [[0, 0], [0, image_size], [image_size, image_size], [image_size, 0]])
for quad in quads]
gen = coach.G.requires_grad_(False).eval()
os.makedirs(output_folder, exist_ok=True)
with open(os.path.join(output_folder, 'opts.json'), 'w') as f:
json.dump(config, f)
for i, (coeffs, crop, orig_image, w) in tqdm(
enumerate(zip(inverse_transforms, crops, orig_images, ws)), total=len(ws)):
w = w[None]
pasted_image = paste_image(coeffs, crop, orig_image)
save_image(pasted_image, output_folder, 'projected', i)
with torch.no_grad():
inversion = gen.synthesis(w, noise_mode='const', force_fp32=True)
pivot = coach.original_G.synthesis(w, noise_mode='const', force_fp32=True)
inversion = to_pil_image(inversion)
pivot = to_pil_image(pivot)
save_image(pivot, output_folder, 'pivot', i)
save_image(inversion, output_folder, 'inversion', i)
save_image(paste_image(coeffs, pivot, orig_image), output_folder, 'pivot_projected', i)
save_image(paste_image(coeffs, inversion, orig_image), output_folder, 'inversion_projected', i) | null |
155,973 | import json
import os
from collections import defaultdict
import click
import imageio
import torch
import torchvision.transforms.functional
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms.functional import to_tensor
from tqdm import tqdm
import models.seg_model_2
from configs import paths_config
from editings.latent_editor import LatentEditor
from utils.alignment import crop_faces_by_quads, calc_alignment_coefficients
from utils.data_utils import make_dataset
from utils.edit_utils import add_texts_to_image_vertical, paste_image_mask, paste_image
from utils.image_utils import concat_images_horizontally, tensor2pil
from utils.models_utils import load_generators
def calc_mask(inversion, segmentation_model):
background_classes = [0, 18, 16]
inversion_resized = torch.cat([F.interpolate(inversion, (512, 512), mode='nearest')])
inversion_normalized = transforms.functional.normalize(inversion_resized.clip(-1, 1).add(1).div(2),
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
segmentation = segmentation_model(inversion_normalized)[0].argmax(dim=1, keepdim=True)
is_foreground = torch.stack([segmentation != cls for cls in background_classes], dim=0).all(dim=0)
foreground_mask = F.interpolate(is_foreground.float(), (1024, 1024), mode='bilinear', align_corners=True)
return foreground_mask
type=click.Choice(['interfacegan', 'styleclip_global'], case_sensitive=False),
default='interfacegan')
def save_image(image, file):
image = image.convert('RGB')
image.save(file, quality=95)
class LatentEditor:
def __init__(self):
interfacegan_directions = {
os.path.splitext(file)[0]: np.load(os.path.join(interfacegan_folder, file), allow_pickle=True)
for file in os.listdir(interfacegan_folder) if file.endswith('.npy')}
self.interfacegan_directions_tensors = {name: torch.from_numpy(arr).cuda()[0, None]
for name, arr in interfacegan_directions.items()}
def get_interfacegan_edits(self, orig_w, edit_names, edit_range):
edits = []
for edit_name, direction in self.interfacegan_directions_tensors.items():
if edit_name not in edit_names:
continue
for factor in np.linspace(*edit_range):
w_edit = self._apply_interfacegan(orig_w, direction, factor / 2)
edits.append((w_edit, edit_name, factor))
return edits, False
def get_styleclip_global_edits(orig_w, neutral_class, target_class, beta, edit_range, generator, edit_name, use_stylespace_std=False):
affine_layers = get_affine_layers(generator.synthesis)
edit_directions = styleclip_global_utils.get_direction(neutral_class, target_class, beta)
edit_disentanglement = beta
if use_stylespace_std:
s_std = load_stylespace_std()
edit_directions = to_styles(edit_directions, affine_layers)
edit = [s * std for s, std in zip(edit_directions, s_std)]
else:
edit = to_styles(edit_directions, affine_layers)
direction = edit_name[0]
factors = np.linspace(*edit_range)
styles = w_to_styles(orig_w, affine_layers)
final_edits = []
for factor in factors:
edited_styles = [style + factor * edit_direction for style, edit_direction in zip(styles, edit)]
final_edits.append((edited_styles, direction, f'{factor}_{edit_disentanglement}'))
return final_edits, True
def _apply_interfacegan(latent, direction, factor=1, factor_range=None):
edit_latents = []
if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5)
for f in range(*factor_range):
edit_latent = latent + f * direction
edit_latents.append(edit_latent)
edit_latents = torch.cat(edit_latents)
else:
edit_latents = latent + factor * direction
return edit_latents
def crop_faces_by_quads(IMAGE_SIZE, files, quads):
orig_images = []
crops = []
for quad, (_, path) in tqdm(zip(quads, files), total=len(quads)):
crop = crop_image(path, IMAGE_SIZE, quad.copy())
orig_image = Image.open(path)
orig_images.append(orig_image)
crops.append(crop)
return crops, orig_images
def calc_alignment_coefficients(pa, pb):
matrix = []
for p1, p2 in zip(pa, pb):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
a = np.matrix(matrix, dtype=float)
b = np.array(pb).reshape(8)
res = np.dot(np.linalg.inv(a.T * a) * a.T, b)
return np.array(res).reshape(8)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for fname in sorted(os.listdir(dir)):
if is_image_file(fname):
path = os.path.join(dir, fname)
fname = fname.split('.')[0]
images.append((fname, path))
return images
def add_texts_to_image_vertical(texts, pivot_images):
images_height = pivot_images.height
images_width = pivot_images.width
text_height = 256 + 16 - images_height % 16
num_images = len(texts)
image_width = images_width // num_images
text_image = Image.new('RGB', (images_width, text_height), (255, 255, 255))
draw = ImageDraw.Draw(text_image)
font_size = int(math.ceil(24 * image_width / 256))
try:
font = ImageFont.truetype("truetype/freefont/FreeSans.ttf", font_size)
except OSError:
font = ImageFont.load_default()
for i, text in enumerate(texts):
draw.text((image_width // 2 + i * image_width, text_height // 2), text, fill='black', anchor='ms', font=font)
out_image = Image.new('RGB', (pivot_images.width, pivot_images.height + text_image.height))
out_image.paste(text_image, (0, 0))
out_image.paste(pivot_images, (0, text_image.height))
return out_image
def paste_image_mask(inverse_transform, image, dst_image, mask, radius=0, sigma=0.0):
image_masked = image.copy().convert('RGBA')
pasted_image = dst_image.copy().convert('RGBA')
if radius != 0:
mask_np = np.array(mask)
kernel_size = (radius * 2 + 1, radius * 2 + 1)
kernel = np.ones(kernel_size)
eroded = cv2.erode(mask_np, kernel, borderType=cv2.BORDER_CONSTANT, borderValue=0)
blurred_mask = cv2.GaussianBlur(eroded, kernel_size, sigmaX=sigma)
blurred_mask = Image.fromarray(blurred_mask)
image_masked.putalpha(blurred_mask)
else:
image_masked.putalpha(mask)
projected = image_masked.transform(dst_image.size, Image.PERSPECTIVE, inverse_transform,
Image.BILINEAR)
pasted_image.alpha_composite(projected)
return pasted_image
def paste_image(inverse_transform, img, orig_image):
pasted_image = orig_image.copy().convert('RGBA')
projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, inverse_transform, Image.BILINEAR)
pasted_image.paste(projected, (0, 0), mask=projected)
return pasted_image
def concat_images_horizontally(*images: Image.Image):
assert all(image.height == images[0].height for image in images)
total_width = sum(image.width for image in images)
new_im = Image.new(images[0].mode, (total_width, images[0].height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset, 0))
x_offset += im.width
return new_im
def tensor2pil(tensor: torch.Tensor) -> Image.Image:
x = tensor.squeeze(0).permute(1, 2, 0).add(1).mul(255).div(2).squeeze()
x = x.detach().cpu().numpy()
x = np.rint(x).clip(0, 255).astype(np.uint8)
return Image.fromarray(x)
def load_generators(run_id):
tuned, pivots, quads = load_tuned_G(run_id=run_id)
original = load_old_G()
gen = load_from_pkl_model(tuned)
orig_gen = load_from_pkl_model(original)
del tuned, original
return gen, orig_gen, pivots, quads
def _main(input_folder, output_folder, start_frame, end_frame, run_name,
edit_range, edit_type, edit_name, use_mask, freeze_fine_layers, neutral_class, target_class, beta,
output_frames, feathering_radius, config):
orig_files = make_dataset(input_folder)
orig_files = orig_files[start_frame:end_frame]
image_size = 1024
segmentation_model = models.seg_model_2.BiSeNet(19).eval().cuda().requires_grad_(False)
segmentation_model.load_state_dict(torch.load(paths_config.segmentation_model_path))
gen, orig_gen, pivots, quads = load_generators(run_name)
crops, orig_images = crop_faces_by_quads(image_size, orig_files, quads)
inverse_transforms = [
calc_alignment_coefficients(quad + 0.5, [[0, 0], [0, image_size], [image_size, image_size], [image_size, 0]])
for quad in quads]
if freeze_fine_layers is not None:
pivots_mean = torch.mean(pivots, dim=0, keepdim=True).expand_as(pivots)
pivots = torch.cat([pivots[:, :freeze_fine_layers], pivots_mean[:, freeze_fine_layers:]], dim=1)
os.makedirs(output_folder, exist_ok=True)
with open(os.path.join(output_folder, 'opts.json'), 'w') as f:
json.dump(config, f)
latent_editor = LatentEditor()
if edit_type == 'styleclip_global':
edits, is_style_input = latent_editor.get_styleclip_global_edits(
pivots, neutral_class, target_class, beta, edit_range, gen, edit_name
)
else:
edits, is_style_input = latent_editor.get_interfacegan_edits(pivots, edit_name, edit_range)
for edits_list, direction, factor in edits:
video_frames = defaultdict(list)
for i, (orig_image, crop, quad, inverse_transform) in \
tqdm(enumerate(zip(orig_images, crops, quads, inverse_transforms)), total=len(orig_images)):
w_pivot = pivots[i][None]
if is_style_input:
w_edit = [style[i][None] for style in edits_list]
else:
w_edit = edits_list[i][None]
edited_tensor = gen.synthesis.forward(w_edit, noise_mode='const', force_fp32=False,
style_input=is_style_input)
mask = None
if use_mask:
crop_tensor = to_tensor(crop).mul(2).sub(1)[None].cuda()
inversion = gen.synthesis(w_pivot, noise_mode='const', force_fp32=False)
mask = calc_mask(crop_tensor, segmentation_model)
mask = tensor2pil(mask.mul(2).sub(1))
else:
inversion = gen.synthesis(w_pivot, noise_mode='const', force_fp32=False)
inversion = tensor2pil(inversion)
edited_image = tensor2pil(edited_tensor)
if mask is not None:
inversion_projection = paste_image_mask(inverse_transform, inversion, orig_image, mask,
radius=feathering_radius)
edit_projection = paste_image_mask(inverse_transform, edited_image, orig_image, mask,
radius=feathering_radius)
else:
inversion_projection = paste_image(inverse_transform, inversion, orig_image)
edit_projection = paste_image(inverse_transform, edited_image, orig_image)
folder_name = f'{direction}/{factor}'
if output_frames:
frames_dir = os.path.join(output_folder, 'frames', folder_name)
os.makedirs(frames_dir, exist_ok=True)
save_image(inversion_projection, os.path.join(frames_dir, f'inversion_{i:04d}.jpeg'))
save_image(orig_image, os.path.join(frames_dir, f'source_{i:04d}.jpeg'))
save_image(edit_projection, os.path.join(frames_dir, f'edit_{i:04d}.jpeg'))
video_frame = concat_images_horizontally(orig_image, inversion_projection, edit_projection)
video_frame = add_texts_to_image_vertical(['original', 'inversion', 'edit'], video_frame)
video_frames[folder_name].append(video_frame)
for folder_name, frames in video_frames.items():
folder_path = os.path.join(output_folder, folder_name)
os.makedirs(folder_path, exist_ok=True)
imageio.mimwrite(os.path.join(folder_path, 'out.mp4'), frames, fps=25, output_params=['-vf', 'fps=25']) | null |
155,975 | # mport numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
#
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
def fma(a, b, c):
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x | null |
155,976 | import copy
import wandb
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from configs import global_config, hyperparameters
from utils import log_utils
import dnnlib
def project(
G,
target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
*,
num_steps=1000,
w_avg_samples=10000,
initial_learning_rate=0.01,
initial_noise_factor=0.05,
lr_rampdown_length=0.25,
lr_rampup_length=0.05,
noise_ramp_length=0.75,
regularize_noise_weight=1e5,
verbose=False,
device: torch.device,
use_wandb=False,
initial_w=None,
image_log_step=global_config.image_rec_result_log_snapshot,
w_name: str
):
assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution)
def logprint(*args):
if verbose:
print(*args)
G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore
# Compute w stats.
logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...')
z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C]
w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C]
w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device)
w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
start_w = initial_w if initial_w is not None else w_avg
# Setup noise inputs.
noise_bufs = {name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name}
# Load VGG16 feature detector.
url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
with dnnlib.util.open_url(url) as f:
vgg16 = torch.jit.load(f).eval().to(device)
# Features for target image.
target_images = target.unsqueeze(0).to(device).to(torch.float32)
if target_images.shape[2] > 256:
target_images = F.interpolate(target_images, size=(256, 256), mode='area')
target_features = vgg16(target_images, resize_images=False, return_lpips=True)
w_opt = torch.tensor(start_w, dtype=torch.float32, device=device,
requires_grad=True) # pylint: disable=not-callable
optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999),
lr=hyperparameters.first_inv_lr)
# Init noise.
for buf in noise_bufs.values():
buf[:] = torch.randn_like(buf)
buf.requires_grad = True
for step in tqdm(range(num_steps)):
# Learning rate schedule.
t = step / num_steps
w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2
lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
lr = initial_learning_rate * lr_ramp
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Synth images from opt_w.
w_noise = torch.randn_like(w_opt) * w_noise_scale
ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1])
synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True)
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
synth_images = (synth_images + 1) * (255 / 2)
if synth_images.shape[2] > 256:
synth_images = F.interpolate(synth_images, size=(256, 256), mode='area')
# Features for synth images.
synth_features = vgg16(synth_images, resize_images=False, return_lpips=True)
dist = (target_features - synth_features).square().sum()
# Noise regularization.
reg_loss = 0.0
for v in noise_bufs.values():
noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d()
while True:
reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2
reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2
if noise.shape[2] <= 8:
break
noise = F.avg_pool2d(noise, kernel_size=2)
loss = dist + reg_loss * regularize_noise_weight
if step % image_log_step == 0:
with torch.no_grad():
if use_wandb:
global_config.training_step += 1
wandb.log({f'first projection _{w_name}': loss.detach().cpu()})
log_utils.log_image_from_w(w_opt.repeat([1, G.mapping.num_ws, 1]), G, w_name)
# Step
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
# Normalize noise.
with torch.no_grad():
for buf in noise_bufs.values():
buf -= buf.mean()
buf *= buf.square().mean().rsqrt()
del G
return w_opt.repeat([1, 18, 1]) | null |
155,977 | import numpy as np
import os
import click
import torch
from tqdm import tqdm
from configs import paths_config
try:
import clip
except ImportError:
print('Warning: clip is not installed, styleclip edits will not work')
pass
imagenet_templates = [
'a bad photo of a {}.',
'a photo of many {}.',
'a sculpture of a {}.',
'a photo of the hard to see {}.',
'a low resolution photo of the {}.',
'a rendering of a {}.',
'graffiti of a {}.',
'a bad photo of the {}.',
'a cropped photo of the {}.',
'a tattoo of a {}.',
'the embroidered {}.',
'a photo of a hard to see {}.',
'a bright photo of a {}.',
'a photo of a clean {}.',
'a photo of a dirty {}.',
'a dark photo of the {}.',
'a drawing of a {}.',
'a photo of my {}.',
'the plastic {}.',
'a photo of the cool {}.',
'a close-up photo of a {}.',
'a black and white photo of the {}.',
'a painting of the {}.',
'a painting of a {}.',
'a pixelated photo of the {}.',
'a sculpture of the {}.',
'a bright photo of the {}.',
'a cropped photo of a {}.',
'a plastic {}.',
'a photo of the dirty {}.',
'a jpeg corrupted photo of a {}.',
'a blurry photo of the {}.',
'a photo of the {}.',
'a good photo of the {}.',
'a rendering of the {}.',
'a {} in a video game.',
'a photo of one {}.',
'a doodle of a {}.',
'a close-up photo of the {}.',
'a photo of a {}.',
'the origami {}.',
'the {} in a video game.',
'a sketch of a {}.',
'a doodle of the {}.',
'a origami {}.',
'a low resolution photo of a {}.',
'the toy {}.',
'a rendition of the {}.',
'a photo of the clean {}.',
'a photo of a large {}.',
'a rendition of a {}.',
'a photo of a nice {}.',
'a photo of a weird {}.',
'a blurry photo of a {}.',
'a cartoon {}.',
'art of a {}.',
'a sketch of the {}.',
'a embroidered {}.',
'a pixelated photo of a {}.',
'itap of the {}.',
'a jpeg corrupted photo of the {}.',
'a good photo of a {}.',
'a plushie {}.',
'a photo of the nice {}.',
'a photo of the small {}.',
'a photo of the weird {}.',
'the cartoon {}.',
'art of the {}.',
'a drawing of the {}.',
'a photo of the large {}.',
'a black and white photo of a {}.',
'the plushie {}.',
'a dark photo of a {}.',
'itap of a {}.',
'graffiti of the {}.',
'a toy {}.',
'itap of my {}.',
'a photo of a cool {}.',
'a photo of a small {}.',
'a tattoo of the {}.',
]
def zeroshot_classifier(model, classnames, templates):
with torch.no_grad():
zeroshot_weights = []
for classname in tqdm(classnames):
texts = [template.format(classname) for template in templates] # format with class
texts = clip.tokenize(texts).cuda() # tokenize
class_embeddings = model.encode_text(texts) # embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda()
return zeroshot_weights
def get_direction(neutral_class, target_class, beta, model=None, di=None):
if di is None:
di = torch.from_numpy(np.load(paths_config.styleclip_fs3)).cuda()
if model is None:
model, _ = clip.load("ViT-B/32")
class_names = [neutral_class, target_class]
class_weights = zeroshot_classifier(model, class_names, imagenet_templates)
dt = class_weights[:, 1] - class_weights[:, 0]
dt = dt / dt.norm()
relevance = di @ dt
mask = relevance.abs() > beta
direction = relevance * mask
direction_max = direction.abs().max()
if direction_max > 0:
direction = direction / direction_max
else:
raise ValueError(f'Beta value {beta} is too high for mapping from {neutral_class} to {target_class},'
f' try setting it to a lower value')
return direction | null |
155,978 | import torch
l2_criterion = torch.nn.MSELoss(reduction='mean')
def l2_loss(real_images, generated_images):
loss = l2_criterion(real_images, generated_images)
return loss | null |
155,979 | import copy
import io
import json
import os
from collections import defaultdict
import click
import imageio
import torch
import torchvision.transforms.functional
from PIL import Image, ImageChops
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms.functional import to_tensor
from tqdm import tqdm, trange
import models.seg_model_2
from configs import hyperparameters, paths_config
from edit_video import save_image
from editings.latent_editor import LatentEditor
from utils.alignment import crop_faces_by_quads, calc_alignment_coefficients
from utils.data_utils import make_dataset
from utils.edit_utils import add_texts_to_image_vertical, paste_image, paste_image_mask
from utils.image_utils import concat_images_horizontally, tensor2pil
from utils.models_utils import load_generators
from utils.morphology import dilation
debug = False
def calc_masks(inversion, segmentation_model, border_pixels, inner_mask_dilation, outer_mask_dilation,
whole_image_border):
background_classes = [0, 18, 16]
inversion_resized = torch.cat([F.interpolate(inversion, (512, 512), mode='nearest')])
inversion_normalized = transforms.functional.normalize(inversion_resized.clip(-1, 1).add(1).div(2),
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
segmentation = segmentation_model(inversion_normalized)[0].argmax(dim=1, keepdim=True)
is_foreground = logical_and_reduce(*[segmentation != cls for cls in background_classes])
foreground_mask = is_foreground.float()
content_mask, border_mask, full_mask = create_masks(border_pixels // 2, foreground_mask, inner_mask_dilation // 2,
outer_mask_dilation // 2, whole_image_border)
content_mask = F.interpolate(content_mask, (1024, 1024), mode='bilinear', align_corners=True)
border_mask = F.interpolate(border_mask, (1024, 1024), mode='bilinear', align_corners=True)
full_mask = F.interpolate(full_mask, (1024, 1024), mode='bilinear', align_corners=True)
return content_mask, border_mask, full_mask
type=click.Choice(['styleclip_global', 'interfacegan'], case_sensitive=False),
default='interfacegan')
def create_dump_file(border_mask_image, crop, full_mask_image, inner_mask_image, inverse_transform, optimized_image,
orig_image, quad, edited_image):
def compress_image(img: Image.Image):
output = io.BytesIO()
img.save(output, format='png', optimize=False)
return output.getvalue()
frame_data = {'inverse_transform': inverse_transform, 'orig_image': compress_image(orig_image), 'quad': quad,
'optimized_image': compress_image(optimized_image), 'crop': compress_image(crop),
'inner_mask_image': compress_image(inner_mask_image),
'full_mask_image': compress_image(full_mask_image),
'border_mask_image': compress_image(border_mask_image),
'edited_image': compress_image(edited_image)}
return frame_data
def optimize_border(G: torch.nn.Module, border_image, content_image, w: torch.Tensor, border_mask, content_mask,
optimize_generator=False, optimize_wplus=False, num_steps=100, loss_l2=True, is_style_input=False,
content_loss_lambda=0.01, border_loss_threshold=0.0):
assert optimize_generator or optimize_wplus
G = copy.deepcopy(G).train(optimize_generator).requires_grad_(optimize_generator).float()
if not is_style_input:
latent = torch.nn.Parameter(w, requires_grad=optimize_wplus)
else:
latent = w
assert not optimize_wplus
parameters = []
if optimize_generator:
parameters += list(G.parameters())
if optimize_wplus:
parameters += [latent]
optimizer = torch.optim.Adam(parameters, hyperparameters.stitching_tuning_lr)
for step in trange(num_steps, leave=False):
generated_image = G.synthesis(latent, style_input=is_style_input, noise_mode='const', force_fp32=True)
border_loss = masked_l2(generated_image, border_image, border_mask, loss_l2)
loss = border_loss + content_loss_lambda * masked_l2(generated_image, content_image, content_mask, loss_l2)
if border_loss < border_loss_threshold:
break
optimizer.zero_grad()
# wandb.log({f'border_loss_{frame_id}': border_loss.item()})
loss.backward()
optimizer.step()
generated_image = G.synthesis(latent, style_input=is_style_input, noise_mode='const', force_fp32=True)
del G
return generated_image.detach()
def save_image(image, file):
image = image.convert('RGB')
image.save(file, quality=95)
class LatentEditor:
def __init__(self):
interfacegan_directions = {
os.path.splitext(file)[0]: np.load(os.path.join(interfacegan_folder, file), allow_pickle=True)
for file in os.listdir(interfacegan_folder) if file.endswith('.npy')}
self.interfacegan_directions_tensors = {name: torch.from_numpy(arr).cuda()[0, None]
for name, arr in interfacegan_directions.items()}
def get_interfacegan_edits(self, orig_w, edit_names, edit_range):
edits = []
for edit_name, direction in self.interfacegan_directions_tensors.items():
if edit_name not in edit_names:
continue
for factor in np.linspace(*edit_range):
w_edit = self._apply_interfacegan(orig_w, direction, factor / 2)
edits.append((w_edit, edit_name, factor))
return edits, False
def get_styleclip_global_edits(orig_w, neutral_class, target_class, beta, edit_range, generator, edit_name, use_stylespace_std=False):
affine_layers = get_affine_layers(generator.synthesis)
edit_directions = styleclip_global_utils.get_direction(neutral_class, target_class, beta)
edit_disentanglement = beta
if use_stylespace_std:
s_std = load_stylespace_std()
edit_directions = to_styles(edit_directions, affine_layers)
edit = [s * std for s, std in zip(edit_directions, s_std)]
else:
edit = to_styles(edit_directions, affine_layers)
direction = edit_name[0]
factors = np.linspace(*edit_range)
styles = w_to_styles(orig_w, affine_layers)
final_edits = []
for factor in factors:
edited_styles = [style + factor * edit_direction for style, edit_direction in zip(styles, edit)]
final_edits.append((edited_styles, direction, f'{factor}_{edit_disentanglement}'))
return final_edits, True
def _apply_interfacegan(latent, direction, factor=1, factor_range=None):
edit_latents = []
if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5)
for f in range(*factor_range):
edit_latent = latent + f * direction
edit_latents.append(edit_latent)
edit_latents = torch.cat(edit_latents)
else:
edit_latents = latent + factor * direction
return edit_latents
def crop_faces_by_quads(IMAGE_SIZE, files, quads):
orig_images = []
crops = []
for quad, (_, path) in tqdm(zip(quads, files), total=len(quads)):
crop = crop_image(path, IMAGE_SIZE, quad.copy())
orig_image = Image.open(path)
orig_images.append(orig_image)
crops.append(crop)
return crops, orig_images
def calc_alignment_coefficients(pa, pb):
matrix = []
for p1, p2 in zip(pa, pb):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
a = np.matrix(matrix, dtype=float)
b = np.array(pb).reshape(8)
res = np.dot(np.linalg.inv(a.T * a) * a.T, b)
return np.array(res).reshape(8)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for fname in sorted(os.listdir(dir)):
if is_image_file(fname):
path = os.path.join(dir, fname)
fname = fname.split('.')[0]
images.append((fname, path))
return images
def add_texts_to_image_vertical(texts, pivot_images):
images_height = pivot_images.height
images_width = pivot_images.width
text_height = 256 + 16 - images_height % 16
num_images = len(texts)
image_width = images_width // num_images
text_image = Image.new('RGB', (images_width, text_height), (255, 255, 255))
draw = ImageDraw.Draw(text_image)
font_size = int(math.ceil(24 * image_width / 256))
try:
font = ImageFont.truetype("truetype/freefont/FreeSans.ttf", font_size)
except OSError:
font = ImageFont.load_default()
for i, text in enumerate(texts):
draw.text((image_width // 2 + i * image_width, text_height // 2), text, fill='black', anchor='ms', font=font)
out_image = Image.new('RGB', (pivot_images.width, pivot_images.height + text_image.height))
out_image.paste(text_image, (0, 0))
out_image.paste(pivot_images, (0, text_image.height))
return out_image
def paste_image_mask(inverse_transform, image, dst_image, mask, radius=0, sigma=0.0):
image_masked = image.copy().convert('RGBA')
pasted_image = dst_image.copy().convert('RGBA')
if radius != 0:
mask_np = np.array(mask)
kernel_size = (radius * 2 + 1, radius * 2 + 1)
kernel = np.ones(kernel_size)
eroded = cv2.erode(mask_np, kernel, borderType=cv2.BORDER_CONSTANT, borderValue=0)
blurred_mask = cv2.GaussianBlur(eroded, kernel_size, sigmaX=sigma)
blurred_mask = Image.fromarray(blurred_mask)
image_masked.putalpha(blurred_mask)
else:
image_masked.putalpha(mask)
projected = image_masked.transform(dst_image.size, Image.PERSPECTIVE, inverse_transform,
Image.BILINEAR)
pasted_image.alpha_composite(projected)
return pasted_image
def paste_image(inverse_transform, img, orig_image):
pasted_image = orig_image.copy().convert('RGBA')
projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, inverse_transform, Image.BILINEAR)
pasted_image.paste(projected, (0, 0), mask=projected)
return pasted_image
def concat_images_horizontally(*images: Image.Image):
assert all(image.height == images[0].height for image in images)
total_width = sum(image.width for image in images)
new_im = Image.new(images[0].mode, (total_width, images[0].height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset, 0))
x_offset += im.width
return new_im
def tensor2pil(tensor: torch.Tensor) -> Image.Image:
x = tensor.squeeze(0).permute(1, 2, 0).add(1).mul(255).div(2).squeeze()
x = x.detach().cpu().numpy()
x = np.rint(x).clip(0, 255).astype(np.uint8)
return Image.fromarray(x)
def load_generators(run_id):
tuned, pivots, quads = load_tuned_G(run_id=run_id)
original = load_old_G()
gen = load_from_pkl_model(tuned)
orig_gen = load_from_pkl_model(original)
del tuned, original
return gen, orig_gen, pivots, quads
def _main(input_folder, output_folder, start_frame, end_frame, run_name,
edit_range, edit_type, edit_name, inner_mask_dilation,
outer_mask_dilation, whole_image_border,
freeze_fine_layers, l2, output_frames, num_steps, neutral_class, target_class,
beta, config, content_loss_lambda, border_loss_threshold):
orig_files = make_dataset(input_folder)
orig_files = orig_files[start_frame:end_frame]
segmentation_model = models.seg_model_2.BiSeNet(19).eval().cuda().requires_grad_(False)
segmentation_model.load_state_dict(torch.load(paths_config.segmentation_model_path))
gen, orig_gen, pivots, quads = load_generators(run_name)
image_size = 1024
crops, orig_images = crop_faces_by_quads(image_size, orig_files, quads)
inverse_transforms = [
calc_alignment_coefficients(quad + 0.5, [[0, 0], [0, image_size], [image_size, image_size], [image_size, 0]])
for quad in quads]
if freeze_fine_layers is not None:
pivots_mean = torch.mean(pivots, dim=0, keepdim=True).expand_as(pivots)
pivots = torch.cat([pivots[:, :freeze_fine_layers], pivots_mean[:, freeze_fine_layers:]], dim=1)
os.makedirs(output_folder, exist_ok=True)
with open(os.path.join(output_folder, 'opts.json'), 'w') as f:
json.dump(config, f)
latent_editor = LatentEditor()
if edit_type == 'styleclip_global':
edits, is_style_input = latent_editor.get_styleclip_global_edits(
pivots, neutral_class, target_class, beta, edit_range, gen, edit_name
)
else:
edits, is_style_input = latent_editor.get_interfacegan_edits(pivots, edit_name, edit_range)
for edits_list, direction, factor in edits:
video_frames = defaultdict(list)
for i, (orig_image, crop, quad, inverse_transform) in \
tqdm(enumerate(zip(orig_images, crops, quads, inverse_transforms)), total=len(orig_images)):
w_interp = pivots[i][None]
if is_style_input:
w_edit_interp = [style[i][None] for style in edits_list]
else:
w_edit_interp = edits_list[i][None]
edited_tensor = gen.synthesis.forward(w_edit_interp, style_input=is_style_input, noise_mode='const',
force_fp32=True)
inversion = gen.synthesis(w_interp, noise_mode='const', force_fp32=True)
border_pixels = outer_mask_dilation
crop_tensor = to_tensor(crop)[None].mul(2).sub(1).cuda()
content_mask, border_mask, full_mask = calc_masks(crop_tensor, segmentation_model, border_pixels,
inner_mask_dilation, outer_mask_dilation,
whole_image_border)
inversion = tensor2pil(inversion)
inversion_projection = paste_image(inverse_transform, inversion, orig_image)
optimized_tensor = optimize_border(gen, crop_tensor, edited_tensor,
w_edit_interp, border_mask=border_mask, content_mask=content_mask,
optimize_generator=True, num_steps=num_steps, loss_l2=l2,
is_style_input=is_style_input, content_loss_lambda=content_loss_lambda,
border_loss_threshold=border_loss_threshold)
video_frames[f'optimized_edits/{direction}/{factor}'].append(
tensor2pil(optimized_tensor)
)
optimized_image = tensor2pil(optimized_tensor)
edited_image = tensor2pil(edited_tensor)
full_mask_image = tensor2pil(full_mask.mul(2).sub(1))
edit_projection = paste_image_mask(inverse_transform, edited_image, orig_image, full_mask_image, radius=0)
optimized_projection = paste_image_mask(inverse_transform, optimized_image, orig_image, full_mask_image,
radius=0)
optimized_projection_feathered = paste_image_mask(inverse_transform, optimized_image, orig_image,
full_mask_image,
radius=outer_mask_dilation // 2)
folder_name = f'{direction}/{factor}'
video_frame = concat_images_horizontally(orig_image, edit_projection, optimized_projection)
video_frame = add_texts_to_image_vertical(['original', 'mask', 'stitching tuning'], video_frame)
video_frames[folder_name].append(video_frame)
video_frame = concat_images_horizontally(orig_image, edit_projection, optimized_projection_feathered)
video_frame = add_texts_to_image_vertical(['original', 'mask', 'stitching tuning'], video_frame)
video_frames[f'{folder_name}/feathering'].append(video_frame)
if output_frames:
frames_dir = os.path.join(output_folder, 'frames', folder_name)
os.makedirs(frames_dir, exist_ok=True)
save_image(inversion_projection, os.path.join(frames_dir, f'pti_{i:04d}.jpeg'))
save_image(orig_image, os.path.join(frames_dir, f'source_{i:04d}.jpeg'))
save_image(edit_projection, os.path.join(frames_dir, f'edit_{i:04d}.jpeg'))
save_image(optimized_projection, os.path.join(frames_dir, f'optimized_{i:04d}.jpeg'))
save_image(optimized_projection_feathered,
os.path.join(frames_dir, f'optimized_feathering_{i:04d}.jpeg'))
if debug:
border_mask_image = tensor2pil(border_mask.mul(2).sub(1))
inner_mask_image = tensor2pil(content_mask.mul(2).sub(1))
video_frames[f'masks/{direction}/{factor}'].append(
concat_images_horizontally(
border_mask_image,
inner_mask_image,
full_mask_image
))
inner_image = optimized_projection.copy()
outer_mask_image = ImageChops.invert(inner_mask_image)
full_mask_projection = full_mask_image.transform(orig_image.size, Image.PERSPECTIVE, inverse_transform,
Image.BILINEAR)
outer_mask_projection = outer_mask_image.transform(orig_image.size, Image.PERSPECTIVE,
inverse_transform,
Image.BILINEAR)
inner_image.putalpha(full_mask_projection)
outer_image = optimized_projection.copy()
outer_image.putalpha(outer_mask_projection)
masked = concat_images_horizontally(inner_image, outer_image)
video_frames[f'masked/{folder_name}'].append(masked)
frame_data = create_dump_file(border_mask_image, crop, full_mask_image, inner_mask_image,
inverse_transform, optimized_image, orig_image, quad, edited_image)
os.makedirs(os.path.join(output_folder, 'dumps', folder_name), exist_ok=True)
torch.save(frame_data, os.path.join(output_folder, 'dumps', folder_name, f'{i}.pt'))
for folder_name, frames in video_frames.items():
folder_path = os.path.join(output_folder, folder_name)
os.makedirs(folder_path, exist_ok=True)
imageio.mimwrite(os.path.join(folder_path, 'out.mp4'), frames, fps=25, output_params=['-vf', 'fps=25']) | null |
155,984 | import warnings
import torch
def _should_use_custom_op():
if not enabled:
return False
if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9', '1.10']):
return True
warnings.warn(f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().')
return False
class _GridSample2dForward(torch.autograd.Function):
def forward(ctx, input, grid):
assert input.ndim == 4
assert grid.ndim == 4
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
ctx.save_for_backward(input, grid)
return output
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
return grad_input, grad_grid
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) | null |
155,998 | from configs import paths_config
import torch
from torch import nn
from models.e4e.encoders import psp_encoders
from models.e4e.stylegan2.model import Generator
def get_keys(d, name):
if 'state_dict' in d:
d = d['state_dict']
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
return d_filt | null |
156,009 | import math
import random
import torch
from torch import nn
from torch.nn import functional as F
from models.StyleCLIP.models.stylegan2.op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k | null |
156,011 | import PIL
import PIL.Image
import dlib
import face_alignment
import numpy as np
import scipy
import scipy.ndimage
import skimage.io as io
from PIL import Image
from scipy.ndimage import gaussian_filter1d
from tqdm import tqdm
from configs import paths_config
def crop_image(filepath, output_size, quad, enable_padding=False):
x = (quad[3] - quad[1]) / 2
qsize = np.hypot(*x) * 2
# read image
if isinstance(filepath, PIL.Image.Image):
img = filepath
else:
img = PIL.Image.open(filepath)
transform_size = output_size
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
min(crop[3] + border, img.size[1]))
if (crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]):
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
return img
def compute_transform(filepath, predictor, detector=None, scale=1.0, fa=None):
lm = get_landmark(filepath, predictor, detector, fa)
if lm is None:
raise Exception(f'Did not detect any faces in image: {filepath}')
lm_chin = lm[0: 17] # left-right
lm_eyebrow_left = lm[17: 22] # left-right
lm_eyebrow_right = lm[22: 27] # left-right
lm_nose = lm[27: 31] # top-down
lm_nostrils = lm[31: 36] # top-down
lm_eye_left = lm[36: 42] # left-clockwise
lm_eye_right = lm[42: 48] # left-clockwise
lm_mouth_outer = lm[48: 60] # left-clockwise
lm_mouth_inner = lm[60: 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
x *= scale
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
return c, x, y
The provided code snippet includes necessary dependencies for implementing the `align_face` function. Write a Python function `def align_face(filepath_or_image, predictor, output_size, detector=None, enable_padding=False, scale=1.0)` to solve the following problem:
:param filepath: str :return: PIL Image
Here is the function:
def align_face(filepath_or_image, predictor, output_size, detector=None,
enable_padding=False, scale=1.0):
"""
:param filepath: str
:return: PIL Image
"""
c, x, y = compute_transform(filepath_or_image, predictor, detector=detector,
scale=scale)
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
img = crop_image(filepath_or_image, output_size, quad, enable_padding=enable_padding)
# Return aligned image.
return img | :param filepath: str :return: PIL Image |
156,012 | from typing import List, Optional
import torch
import torch.nn.functional as F
def dilation(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the dilated image applying the same kernel in each channel.
.. image:: _static/img/dilation.png
The kernel must have 2 dimensions.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat
structuring element.
origin: Origin of the structuring element. Default: ``None`` and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if ``border_type`` is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: convolution is faster and less memory hungry, and unfold is more stable numerically
Returns:
Dilated image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(3, 3)
>>> dilated_img = dilation(tensor, kernel)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(tensor)}")
if len(tensor.shape) != 4:
raise ValueError(f"Input size must have 4 dimensions. Got {tensor.dim()}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Kernel type is not a torch.Tensor. Got {type(kernel)}")
if len(kernel.shape) != 2:
raise ValueError(f"Kernel size must have 2 dimensions. Got {kernel.dim()}")
# origin
se_h, se_w = kernel.shape
if origin is None:
origin = [se_h // 2, se_w // 2]
# pad
pad_e: List[int] = [origin[1], se_w - origin[1] - 1, origin[0], se_h - origin[0] - 1]
if border_type == 'geodesic':
border_value = -max_val
border_type = 'constant'
output: torch.Tensor = F.pad(tensor, pad_e, mode=border_type, value=border_value)
# computation
if structuring_element is None:
neighborhood = torch.zeros_like(kernel)
neighborhood[kernel == 0] = -max_val
else:
neighborhood = structuring_element.clone()
neighborhood[kernel == 0] = -max_val
if engine == 'unfold':
output = output.unfold(2, se_h, 1).unfold(3, se_w, 1)
output, _ = torch.max(output + neighborhood.flip((0, 1)), 4)
output, _ = torch.max(output, 4)
elif engine == 'convolution':
B, C, H, W = tensor.size()
h_pad, w_pad = output.shape[-2:]
reshape_kernel = _neight2channels_like_kernel(kernel)
output, _ = F.conv2d(
output.view(B * C, 1, h_pad, w_pad), reshape_kernel, padding=0, bias=neighborhood.view(-1).flip(0)
).max(dim=1)
output = output.view(B, C, H, W)
else:
raise NotImplementedError(f"engine {engine} is unknown, use 'convolution' or 'unfold'")
return output.view_as(tensor)
def erosion(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the eroded image applying the same kernel in each channel.
.. image:: _static/img/erosion.png
The kernel must have 2 dimensions.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element (torch.Tensor, optional): Structuring element used for the grayscale dilation.
It may be a non-flat structuring element.
origin: Origin of the structuring element. Default: ``None`` and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if border_type is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: ``convolution`` is faster and less memory hungry, and ``unfold`` is more stable numerically
Returns:
Eroded image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(5, 5)
>>> output = erosion(tensor, kernel)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(tensor)}")
if len(tensor.shape) != 4:
raise ValueError(f"Input size must have 4 dimensions. Got {tensor.dim()}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Kernel type is not a torch.Tensor. Got {type(kernel)}")
if len(kernel.shape) != 2:
raise ValueError(f"Kernel size must have 2 dimensions. Got {kernel.dim()}")
# origin
se_h, se_w = kernel.shape
if origin is None:
origin = [se_h // 2, se_w // 2]
# pad
pad_e: List[int] = [origin[1], se_w - origin[1] - 1, origin[0], se_h - origin[0] - 1]
if border_type == 'geodesic':
border_value = max_val
border_type = 'constant'
output: torch.Tensor = F.pad(tensor, pad_e, mode=border_type, value=border_value)
# computation
if structuring_element is None:
neighborhood = torch.zeros_like(kernel)
neighborhood[kernel == 0] = -max_val
else:
neighborhood = structuring_element.clone()
neighborhood[kernel == 0] = -max_val
if engine == 'unfold':
output = output.unfold(2, se_h, 1).unfold(3, se_w, 1)
output, _ = torch.min(output - neighborhood, 4)
output, _ = torch.min(output, 4)
elif engine == 'convolution':
B, C, H, W = tensor.size()
Hpad, Wpad = output.shape[-2:]
reshape_kernel = _neight2channels_like_kernel(kernel)
output, _ = F.conv2d(output.view(B * C, 1, Hpad, Wpad),
reshape_kernel,
padding=0,
bias=-neighborhood.view(-1)).min(dim=1)
output = output.view(B, C, H, W)
else:
raise NotImplementedError(f"engine {engine} is unknown, use 'convolution' or 'unfold'")
return output
The provided code snippet includes necessary dependencies for implementing the `gradient` function. Write a Python function `def gradient( tensor: torch.Tensor, kernel: torch.Tensor, structuring_element: Optional[torch.Tensor] = None, origin: Optional[List[int]] = None, border_type: str = 'geodesic', border_value: float = 0.0, max_val: float = 1e4, engine: str = 'unfold', ) -> torch.Tensor` to solve the following problem:
r"""Return the morphological gradient of an image. .. image:: _static/img/gradient.png That means, (dilation - erosion) applying the same kernel in each channel. The kernel must have 2 dimensions. Args: tensor: Image with shape :math:`(B, C, H, W)`. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default is None and uses the center of the structuring element as origin (rounding towards zero). border_type: It determines how the image borders are handled, where ``border_value`` is the value when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are outside the image when applying the operation. border_value: Value to fill past edges of input if ``border_type`` is ``constant``. max_val: The value of the infinite elements in the kernel. engine: convolution is faster and less memory hungry, and unfold is more stable numerically Returns: Gradient image with shape :math:`(B, C, H, W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ morphology_101.html>`__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> gradient_img = gradient(tensor, kernel)
Here is the function:
def gradient(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the morphological gradient of an image.
.. image:: _static/img/gradient.png
That means, (dilation - erosion) applying the same kernel in each channel.
The kernel must have 2 dimensions.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element: Structuring element used for the grayscale dilation. It may be a
non-flat structuring element.
origin: Origin of the structuring element. Default is None and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if ``border_type`` is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: convolution is faster and less memory hungry, and unfold is more stable numerically
Returns:
Gradient image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(3, 3)
>>> gradient_img = gradient(tensor, kernel)
"""
return dilation(
tensor,
kernel=kernel,
structuring_element=structuring_element,
origin=origin,
border_type=border_type,
border_value=border_value,
max_val=max_val,
engine=engine,
) - erosion(
tensor,
kernel=kernel,
structuring_element=structuring_element,
origin=origin,
border_type=border_type,
border_value=border_value,
max_val=max_val,
engine=engine,
) | r"""Return the morphological gradient of an image. .. image:: _static/img/gradient.png That means, (dilation - erosion) applying the same kernel in each channel. The kernel must have 2 dimensions. Args: tensor: Image with shape :math:`(B, C, H, W)`. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default is None and uses the center of the structuring element as origin (rounding towards zero). border_type: It determines how the image borders are handled, where ``border_value`` is the value when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are outside the image when applying the operation. border_value: Value to fill past edges of input if ``border_type`` is ``constant``. max_val: The value of the infinite elements in the kernel. engine: convolution is faster and less memory hungry, and unfold is more stable numerically Returns: Gradient image with shape :math:`(B, C, H, W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ morphology_101.html>`__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> gradient_img = gradient(tensor, kernel) |
156,013 | from typing import List, Optional
import torch
import torch.nn.functional as F
def opening(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the opened image, (that means, dilation after an erosion) applying the same kernel in each channel.
.. image:: _static/img/opening.png
The kernel must have 2 dimensions.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element: Structuring element used for the grayscale dilation. It may be a
non-flat structuring element.
origin: Origin of the structuring element. Default: ``None`` and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if ``border_type`` is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: convolution is faster and less memory hungry, and unfold is more stable numerically
Returns:
torch.Tensor: Opened image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(3, 3)
>>> opened_img = opening(tensor, kernel)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(tensor)}")
if len(tensor.shape) != 4:
raise ValueError(f"Input size must have 4 dimensions. Got {tensor.dim()}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Kernel type is not a torch.Tensor. Got {type(kernel)}")
if len(kernel.shape) != 2:
raise ValueError(f"Kernel size must have 2 dimensions. Got {kernel.dim()}")
return dilation(
erosion(
tensor,
kernel=kernel,
structuring_element=structuring_element,
origin=origin,
border_type=border_type,
border_value=border_value,
max_val=max_val,
),
kernel=kernel,
structuring_element=structuring_element,
origin=origin,
border_type=border_type,
border_value=border_value,
max_val=max_val,
engine=engine,
)
The provided code snippet includes necessary dependencies for implementing the `top_hat` function. Write a Python function `def top_hat( tensor: torch.Tensor, kernel: torch.Tensor, structuring_element: Optional[torch.Tensor] = None, origin: Optional[List[int]] = None, border_type: str = 'geodesic', border_value: float = 0.0, max_val: float = 1e4, engine: str = 'unfold', ) -> torch.Tensor` to solve the following problem:
r"""Return the top hat transformation of an image. .. image:: _static/img/top_hat.png That means, (image - opened_image) applying the same kernel in each channel. The kernel must have 2 dimensions. See :func:`~kornia.morphology.opening` for details. Args: tensor: Image with shape :math:`(B, C, H, W)`. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default: ``None`` and uses the center of the structuring element as origin (rounding towards zero). border_type: It determines how the image borders are handled, where ``border_value`` is the value when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are outside the image when applying the operation. border_value: Value to fill past edges of input if ``border_type`` is ``constant``. max_val: The value of the infinite elements in the kernel. engine: convolution is faster and less memory hungry, and unfold is more stable numerically Returns: Top hat transformed image with shape :math:`(B, C, H, W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ morphology_101.html>`__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> top_hat_img = top_hat(tensor, kernel)
Here is the function:
def top_hat(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the top hat transformation of an image.
.. image:: _static/img/top_hat.png
That means, (image - opened_image) applying the same kernel in each channel.
The kernel must have 2 dimensions.
See :func:`~kornia.morphology.opening` for details.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element: Structuring element used for the grayscale dilation. It may be a
non-flat structuring element.
origin: Origin of the structuring element. Default: ``None`` and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if ``border_type`` is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: convolution is faster and less memory hungry, and unfold is more stable numerically
Returns:
Top hat transformed image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(3, 3)
>>> top_hat_img = top_hat(tensor, kernel)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(tensor)}")
if len(tensor.shape) != 4:
raise ValueError(f"Input size must have 4 dimensions. Got {tensor.dim()}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Kernel type is not a torch.Tensor. Got {type(kernel)}")
if len(kernel.shape) != 2:
raise ValueError(f"Kernel size must have 2 dimensions. Got {kernel.dim()}")
return tensor - opening(
tensor,
kernel=kernel,
structuring_element=structuring_element,
origin=origin,
border_type=border_type,
border_value=border_value,
max_val=max_val,
engine=engine,
) | r"""Return the top hat transformation of an image. .. image:: _static/img/top_hat.png That means, (image - opened_image) applying the same kernel in each channel. The kernel must have 2 dimensions. See :func:`~kornia.morphology.opening` for details. Args: tensor: Image with shape :math:`(B, C, H, W)`. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default: ``None`` and uses the center of the structuring element as origin (rounding towards zero). border_type: It determines how the image borders are handled, where ``border_value`` is the value when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are outside the image when applying the operation. border_value: Value to fill past edges of input if ``border_type`` is ``constant``. max_val: The value of the infinite elements in the kernel. engine: convolution is faster and less memory hungry, and unfold is more stable numerically Returns: Top hat transformed image with shape :math:`(B, C, H, W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ morphology_101.html>`__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> top_hat_img = top_hat(tensor, kernel) |
156,014 | from typing import List, Optional
import torch
import torch.nn.functional as F
def closing(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the closed image, (that means, erosion after a dilation) applying the same kernel in each channel.
.. image:: _static/img/closing.png
The kernel must have 2 dimensions.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element: Structuring element used for the grayscale dilation. It may be a
non-flat structuring element.
origin: Origin of the structuring element. Default is None and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if ``border_type`` is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: convolution is faster and less memory hungry, and unfold is more stable numerically
Returns:
Closed image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(3, 3)
>>> closed_img = closing(tensor, kernel)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(tensor)}")
if len(tensor.shape) != 4:
raise ValueError(f"Input size must have 4 dimensions. Got {tensor.dim()}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Kernel type is not a torch.Tensor. Got {type(kernel)}")
if len(kernel.shape) != 2:
raise ValueError(f"Kernel size must have 2 dimensions. Got {kernel.dim()}")
return erosion(
dilation(
tensor,
kernel=kernel,
structuring_element=structuring_element,
origin=origin,
border_type=border_type,
border_value=border_value,
max_val=max_val,
),
kernel=kernel,
structuring_element=structuring_element,
origin=origin,
border_type=border_type,
border_value=border_value,
max_val=max_val,
engine=engine,
)
The provided code snippet includes necessary dependencies for implementing the `bottom_hat` function. Write a Python function `def bottom_hat( tensor: torch.Tensor, kernel: torch.Tensor, structuring_element: Optional[torch.Tensor] = None, origin: Optional[List[int]] = None, border_type: str = 'geodesic', border_value: float = 0.0, max_val: float = 1e4, engine: str = 'unfold', ) -> torch.Tensor` to solve the following problem:
r"""Return the bottom hat transformation of an image. .. image:: _static/img/bottom_hat.png That means, (closed_image - image) applying the same kernel in each channel. The kernel must have 2 dimensions. See :func:`~kornia.morphology.closing` for details. Args: tensor: Image with shape :math:`(B, C, H, W)`. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default: ``None`` and uses the center of the structuring element as origin (rounding towards zero). border_type: It determines how the image borders are handled, where ``border_value`` is the value when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are outside the image when applying the operation. border_value: Value to fill past edges of input if ``border_type`` is ``constant``. max_val: The value of the infinite elements in the kernel. engine: convolution is faster and less memory hungry, and unfold is more stable numerically Returns: Top hat transformed image with shape :math:`(B, C, H, W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ morphology_101.html>`__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> bottom_hat_img = bottom_hat(tensor, kernel)
Here is the function:
def bottom_hat(
tensor: torch.Tensor,
kernel: torch.Tensor,
structuring_element: Optional[torch.Tensor] = None,
origin: Optional[List[int]] = None,
border_type: str = 'geodesic',
border_value: float = 0.0,
max_val: float = 1e4,
engine: str = 'unfold',
) -> torch.Tensor:
r"""Return the bottom hat transformation of an image.
.. image:: _static/img/bottom_hat.png
That means, (closed_image - image) applying the same kernel in each channel.
The kernel must have 2 dimensions.
See :func:`~kornia.morphology.closing` for details.
Args:
tensor: Image with shape :math:`(B, C, H, W)`.
kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give
the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.
For full structural elements use torch.ones_like(structural_element).
structuring_element: Structuring element used for the grayscale dilation. It may be a
non-flat structuring element.
origin: Origin of the structuring element. Default: ``None`` and uses the center of
the structuring element as origin (rounding towards zero).
border_type: It determines how the image borders are handled, where ``border_value`` is the value
when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are
outside the image when applying the operation.
border_value: Value to fill past edges of input if ``border_type`` is ``constant``.
max_val: The value of the infinite elements in the kernel.
engine: convolution is faster and less memory hungry, and unfold is more stable numerically
Returns:
Top hat transformed image with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
morphology_101.html>`__.
Example:
>>> tensor = torch.rand(1, 3, 5, 5)
>>> kernel = torch.ones(3, 3)
>>> bottom_hat_img = bottom_hat(tensor, kernel)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(tensor)}")
if len(tensor.shape) != 4:
raise ValueError(f"Input size must have 4 dimensions. Got {tensor.dim()}")
if not isinstance(kernel, torch.Tensor):
raise TypeError(f"Kernel type is not a torch.Tensor. Got {type(kernel)}")
if len(kernel.shape) != 2:
raise ValueError(f"Kernel size must have 2 dimensions. Got {kernel.dim()}")
return (
closing(
tensor,
kernel=kernel,
structuring_element=structuring_element,
origin=origin,
border_type=border_type,
border_value=border_value,
max_val=max_val,
engine=engine,
)
- tensor
) | r"""Return the bottom hat transformation of an image. .. image:: _static/img/bottom_hat.png That means, (closed_image - image) applying the same kernel in each channel. The kernel must have 2 dimensions. See :func:`~kornia.morphology.closing` for details. Args: tensor: Image with shape :math:`(B, C, H, W)`. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default: ``None`` and uses the center of the structuring element as origin (rounding towards zero). border_type: It determines how the image borders are handled, where ``border_value`` is the value when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are outside the image when applying the operation. border_value: Value to fill past edges of input if ``border_type`` is ``constant``. max_val: The value of the infinite elements in the kernel. engine: convolution is faster and less memory hungry, and unfold is more stable numerically Returns: Top hat transformed image with shape :math:`(B, C, H, W)`. .. note:: See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/ morphology_101.html>`__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> bottom_hat_img = bottom_hat(tensor, kernel) |
156,015 | import argparse
import math
import os
import pickle
from typing import List
import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import configs.paths_config
from configs import paths_config
from training.networks import SynthesisBlock
#
class SynthesisBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
**layer_kwargs, # Arguments for SynthesisLayer.
):
def forward(self, x, img, ws, style_input=False, force_fp32=False, fused_modconv=None, noise_bufs=None, **layer_kwargs):
def get_affine_layers(synthesis):
blocks: List[SynthesisBlock] = [getattr(synthesis, f'b{res}') for res in synthesis.block_resolutions]
affine_layers = []
for block in blocks:
if hasattr(block, 'conv0'):
affine_layers.append((block.conv0.affine, True))
affine_layers.append((block.conv1.affine, True))
affine_layers.append((block.torgb.affine, False))
return affine_layers | null |
156,016 | import argparse
import math
import os
import pickle
from typing import List
import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import configs.paths_config
from configs import paths_config
from training.networks import SynthesisBlock
def load_stylespace_std():
with open(paths_config.stylespace_mean_std, 'rb') as f:
_, s_std = pickle.load(f)
s_std = [torch.from_numpy(s).cuda() for s in s_std]
return s_std | null |
156,017 | import argparse
import math
import os
import pickle
from typing import List
import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import configs.paths_config
from configs import paths_config
from training.networks import SynthesisBlock
def to_styles(edit: torch.Tensor, affine_layers):
idx = 0
styles = []
for layer, is_conv in affine_layers:
layer_dim = layer.weight.shape[0]
if is_conv:
styles.append(edit[idx:idx + layer_dim].clone())
idx += layer_dim
else:
styles.append(torch.zeros(layer_dim, device=edit.device, dtype=edit.dtype))
return styles | null |
156,018 | import argparse
import math
import os
import pickle
from typing import List
import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import configs.paths_config
from configs import paths_config
from training.networks import SynthesisBlock
def w_to_styles(w, affine_layers):
w_idx = 0
styles = []
for affine, is_conv in affine_layers:
styles.append(affine(w[:, w_idx]))
if is_conv:
w_idx += 1
return styles | null |
156,019 | import numpy as np
from PIL import Image
import wandb
from configs import global_config
import torch
def log_image_from_w(w, G, name):
img = get_image_from_w(w, G)
pillow_image = Image.fromarray(img)
wandb.log(
{f"{name}": [
wandb.Image(pillow_image, caption=f"current inversion {name}")]}
)
def log_images_from_w(ws, G, names):
for name, w in zip(names, ws):
w = w.to(global_config.device)
log_image_from_w(w, G, name) | null |
156,020 | import numpy as np
from PIL import Image
import wandb
from configs import global_config
import torch
def save_image(name, method_type, results_dir, image, run_id):
image.save(f'{results_dir}/{method_type}_{name}_{run_id}.jpg')
def get_image_from_w(w, G):
if len(w.size()) <= 2:
w = w.unsqueeze(0)
with torch.no_grad():
img = G.synthesis(w, noise_mode='const')
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).detach().cpu().numpy()
return img[0]
def save_w(w, G, name, method_type, results_dir):
im = get_image_from_w(w, G)
im = Image.fromarray(im, mode='RGB')
save_image(name, method_type, results_dir, im) | null |
156,021 | import numpy as np
from PIL import Image
import wandb
from configs import global_config
import torch
def create_alongside_images(images):
res = np.concatenate([np.array(image) for image in images], axis=1)
return Image.fromarray(res, mode='RGB')
def get_image_from_w(w, G):
if len(w.size()) <= 2:
w = w.unsqueeze(0)
with torch.no_grad():
img = G.synthesis(w, noise_mode='const')
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).detach().cpu().numpy()
return img[0]
def save_concat_image(base_dir, image_latents, new_inv_image_latent, new_G,
old_G,
file_name,
extra_image=None):
images_to_save = []
if extra_image is not None:
images_to_save.append(extra_image)
for latent in image_latents:
images_to_save.append(get_image_from_w(latent, old_G))
images_to_save.append(get_image_from_w(new_inv_image_latent, new_G))
result_image = create_alongside_images(images_to_save)
result_image.save(f'{base_dir}/{file_name}.jpg') | null |
156,022 | import numpy as np
from PIL import Image
import wandb
from configs import global_config
import torch
def get_image_from_w(w, G):
if len(w.size()) <= 2:
w = w.unsqueeze(0)
with torch.no_grad():
img = G.synthesis(w, noise_mode='const')
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).detach().cpu().numpy()
return img[0]
def save_single_image(base_dir, image_latent, G, file_name):
image_to_save = get_image_from_w(image_latent, G)
image_to_save = Image.fromarray(image_to_save, mode='RGB')
image_to_save.save(f'{base_dir}/{file_name}.jpg') | null |
156,023 | import copy
import pickle
from argparse import Namespace
import torch
from configs import paths_config, global_config
from models.e4e.psp import pSp
from training.networks import Generator
class pSp(nn.Module):
def __init__(self, opts):
super(pSp, self).__init__()
self.opts = opts
# Define architecture
self.encoder = self.set_encoder()
self.decoder = Generator(opts.stylegan_size, 512, 8, channel_multiplier=2)
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
# Load weights if needed
self.load_weights()
def set_encoder(self):
if self.opts.encoder_type == 'GradualStyleEncoder':
encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts)
elif self.opts.encoder_type == 'Encoder4Editing':
encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts)
else:
raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type))
return encoder
def load_weights(self):
if self.opts.checkpoint_path is not None:
print('Loading e4e over the pSp framework from checkpoint: {}'.format(self.opts.checkpoint_path))
ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True)
self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True)
self.__load_latent_avg(ckpt)
else:
print('Loading encoders weights from irse50!')
encoder_ckpt = torch.load(paths_config.ir_se50)
self.encoder.load_state_dict(encoder_ckpt, strict=False)
print('Loading decoder weights from pretrained!')
ckpt = torch.load(self.opts.stylegan_weights)
self.decoder.load_state_dict(ckpt['g_ema'], strict=False)
self.__load_latent_avg(ckpt, repeat=self.encoder.style_count)
def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True,
inject_latent=None, return_latents=False, alpha=None):
if input_code:
codes = x
else:
codes = self.encoder(x)
# normalize with respect to the center of an average face
if self.opts.start_from_latent_avg:
if codes.ndim == 2:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
else:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)
if latent_mask is not None:
for i in latent_mask:
if inject_latent is not None:
if alpha is not None:
codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i]
else:
codes[:, i] = inject_latent[:, i]
else:
codes[:, i] = 0
input_is_latent = not input_code
images, result_latent = self.decoder([codes],
input_is_latent=input_is_latent,
randomize_noise=randomize_noise,
return_latents=return_latents)
if resize:
images = self.face_pool(images)
if return_latents:
return images, result_latent
else:
return images
def __load_latent_avg(self, ckpt, repeat=None):
if 'latent_avg' in ckpt:
self.latent_avg = ckpt['latent_avg'].to(self.opts.device)
if repeat is not None:
self.latent_avg = self.latent_avg.repeat(repeat, 1)
else:
self.latent_avg = None
def initialize_e4e_wplus():
ckpt = torch.load(paths_config.e4e, map_location='cpu')
opts = ckpt['opts']
opts['checkpoint_path'] = paths_config.e4e
opts = Namespace(**opts)
e4e_inversion_net = pSp(opts)
e4e_inversion_net = e4e_inversion_net.eval().to(global_config.device).requires_grad_(False)
return e4e_inversion_net | null |
156,024 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
async def _event_type(event: Event) -> str:
return event.get_type()
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `EventType` function. Write a Python function `def EventType() -> str` to solve the following problem:
{ref}`nonebot.adapters.Event` 类型参数
Here is the function:
def EventType() -> str:
"""{ref}`nonebot.adapters.Event` 类型参数"""
return Depends(_event_type) | {ref}`nonebot.adapters.Event` 类型参数 |
156,025 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
async def _event_message(event: Event) -> Message:
return event.get_message()
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `EventMessage` function. Write a Python function `def EventMessage() -> Any` to solve the following problem:
{ref}`nonebot.adapters.Event` 消息参数
Here is the function:
def EventMessage() -> Any:
"""{ref}`nonebot.adapters.Event` 消息参数"""
return Depends(_event_message) | {ref}`nonebot.adapters.Event` 消息参数 |
156,026 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
async def _event_plain_text(event: Event) -> str:
return event.get_plaintext()
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `EventPlainText` function. Write a Python function `def EventPlainText() -> str` to solve the following problem:
{ref}`nonebot.adapters.Event` 纯文本消息参数
Here is the function:
def EventPlainText() -> str:
"""{ref}`nonebot.adapters.Event` 纯文本消息参数"""
return Depends(_event_plain_text) | {ref}`nonebot.adapters.Event` 纯文本消息参数 |
156,027 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
async def _event_to_me(event: Event) -> bool:
return event.is_tome()
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `EventToMe` function. Write a Python function `def EventToMe() -> bool` to solve the following problem:
{ref}`nonebot.adapters.Event` `to_me` 参数
Here is the function:
def EventToMe() -> bool:
"""{ref}`nonebot.adapters.Event` `to_me` 参数"""
return Depends(_event_to_me) | {ref}`nonebot.adapters.Event` `to_me` 参数 |
156,028 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _command(state: T_State) -> Message:
return state[PREFIX_KEY][CMD_KEY]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `Command` function. Write a Python function `def Command() -> Tuple[str, ...]` to solve the following problem:
消息命令元组
Here is the function:
def Command() -> Tuple[str, ...]:
"""消息命令元组"""
return Depends(_command) | 消息命令元组 |
156,029 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _raw_command(state: T_State) -> Message:
return state[PREFIX_KEY][RAW_CMD_KEY]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `RawCommand` function. Write a Python function `def RawCommand() -> str` to solve the following problem:
消息命令文本
Here is the function:
def RawCommand() -> str:
"""消息命令文本"""
return Depends(_raw_command) | 消息命令文本 |
156,030 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _command_start(state: T_State) -> str:
return state[PREFIX_KEY][CMD_START_KEY]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `CommandStart` function. Write a Python function `def CommandStart() -> str` to solve the following problem:
消息命令开头
Here is the function:
def CommandStart() -> str:
"""消息命令开头"""
return Depends(_command_start) | 消息命令开头 |
156,031 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _command_whitespace(state: T_State) -> str:
return state[PREFIX_KEY][CMD_WHITESPACE_KEY]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `CommandWhitespace` function. Write a Python function `def CommandWhitespace() -> str` to solve the following problem:
消息命令与参数之间的空白
Here is the function:
def CommandWhitespace() -> str:
"""消息命令与参数之间的空白"""
return Depends(_command_whitespace) | 消息命令与参数之间的空白 |
156,032 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _shell_command_args(state: T_State) -> Any:
return state[SHELL_ARGS] # Namespace or ParserExit
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `ShellCommandArgs` function. Write a Python function `def ShellCommandArgs() -> Any` to solve the following problem:
shell 命令解析后的参数字典
Here is the function:
def ShellCommandArgs() -> Any:
"""shell 命令解析后的参数字典"""
return Depends(_shell_command_args, use_cache=False) | shell 命令解析后的参数字典 |
156,033 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _shell_command_argv(state: T_State) -> List[Union[str, MessageSegment]]:
return state[SHELL_ARGV]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `ShellCommandArgv` function. Write a Python function `def ShellCommandArgv() -> Any` to solve the following problem:
shell 命令原始参数列表
Here is the function:
def ShellCommandArgv() -> Any:
"""shell 命令原始参数列表"""
return Depends(_shell_command_argv, use_cache=False) | shell 命令原始参数列表 |
156,034 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _regex_matched(state: T_State) -> Match[str]:
return state[REGEX_MATCHED]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `RegexMatched` function. Write a Python function `def RegexMatched() -> Match[str]` to solve the following problem:
正则匹配结果
Here is the function:
def RegexMatched() -> Match[str]:
"""正则匹配结果"""
return Depends(_regex_matched, use_cache=False) | 正则匹配结果 |
156,035 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def RegexStr(__group: Literal[0] = 0) -> str: ... | null |
156,036 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def RegexStr(__group: Union[str, int]) -> Union[str, Any]: ... | null |
156,037 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def RegexStr(
__group1: Union[str, int], __group2: Union[str, int], *groups: Union[str, int]
) -> Tuple[Union[str, Any], ...]: ... | null |
156,038 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _regex_str(
groups: Tuple[Union[str, int], ...]
) -> Callable[[T_State], Union[str, Tuple[Union[str, Any], ...], Any]]:
def _regex_str_dependency(
state: T_State,
) -> Union[str, Tuple[Union[str, Any], ...], Any]:
return _regex_matched(state).group(*groups)
return _regex_str_dependency
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `RegexStr` function. Write a Python function `def RegexStr(*groups: Union[str, int]) -> Union[str, Tuple[Union[str, Any], ...], Any]` to solve the following problem:
正则匹配结果文本
Here is the function:
def RegexStr(*groups: Union[str, int]) -> Union[str, Tuple[Union[str, Any], ...], Any]:
"""正则匹配结果文本"""
return Depends(_regex_str(groups), use_cache=False) | 正则匹配结果文本 |
156,039 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _regex_group(state: T_State) -> Tuple[Any, ...]:
return _regex_matched(state).groups()
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `RegexGroup` function. Write a Python function `def RegexGroup() -> Tuple[Any, ...]` to solve the following problem:
正则匹配结果 group 元组
Here is the function:
def RegexGroup() -> Tuple[Any, ...]:
"""正则匹配结果 group 元组"""
return Depends(_regex_group, use_cache=False) | 正则匹配结果 group 元组 |
156,040 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _regex_dict(state: T_State) -> Dict[str, Any]:
return _regex_matched(state).groupdict()
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `RegexDict` function. Write a Python function `def RegexDict() -> Dict[str, Any]` to solve the following problem:
正则匹配结果 group 字典
Here is the function:
def RegexDict() -> Dict[str, Any]:
"""正则匹配结果 group 字典"""
return Depends(_regex_dict, use_cache=False) | 正则匹配结果 group 字典 |
156,041 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _startswith(state: T_State) -> str:
return state[STARTSWITH_KEY]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `Startswith` function. Write a Python function `def Startswith() -> str` to solve the following problem:
响应触发前缀
Here is the function:
def Startswith() -> str:
"""响应触发前缀"""
return Depends(_startswith, use_cache=False) | 响应触发前缀 |
156,042 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _endswith(state: T_State) -> str:
return state[ENDSWITH_KEY]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `Endswith` function. Write a Python function `def Endswith() -> str` to solve the following problem:
响应触发后缀
Here is the function:
def Endswith() -> str:
"""响应触发后缀"""
return Depends(_endswith, use_cache=False) | 响应触发后缀 |
156,043 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _fullmatch(state: T_State) -> str:
return state[FULLMATCH_KEY]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `Fullmatch` function. Write a Python function `def Fullmatch() -> str` to solve the following problem:
响应触发完整消息
Here is the function:
def Fullmatch() -> str:
"""响应触发完整消息"""
return Depends(_fullmatch, use_cache=False) | 响应触发完整消息 |
156,044 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def _keyword(state: T_State) -> str:
return state[KEYWORD_KEY]
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `Keyword` function. Write a Python function `def Keyword() -> str` to solve the following problem:
响应触发关键字
Here is the function:
def Keyword() -> str:
"""响应触发关键字"""
return Depends(_keyword, use_cache=False) | 响应触发关键字 |
156,045 | from typing import (
Any,
Dict,
List,
Match,
Tuple,
Union,
Literal,
Callable,
Optional,
overload,
)
from nonebot.typing import T_State
from nonebot.matcher import Matcher
from nonebot.internal.params import Arg as Arg
from nonebot.internal.params import ArgStr as ArgStr
from nonebot.internal.params import Depends as Depends
from nonebot.internal.params import ArgParam as ArgParam
from nonebot.internal.params import BotParam as BotParam
from nonebot.adapters import Event, Message, MessageSegment
from nonebot.internal.params import EventParam as EventParam
from nonebot.internal.params import StateParam as StateParam
from nonebot.internal.params import DependParam as DependParam
from nonebot.internal.params import ArgPlainText as ArgPlainText
from nonebot.internal.params import DefaultParam as DefaultParam
from nonebot.internal.params import MatcherParam as MatcherParam
from nonebot.internal.params import ExceptionParam as ExceptionParam
from nonebot.consts import (
CMD_KEY,
PREFIX_KEY,
SHELL_ARGS,
SHELL_ARGV,
CMD_ARG_KEY,
KEYWORD_KEY,
RAW_CMD_KEY,
ENDSWITH_KEY,
CMD_START_KEY,
FULLMATCH_KEY,
REGEX_MATCHED,
STARTSWITH_KEY,
CMD_WHITESPACE_KEY,
)
def Depends(
dependency: Optional[T_Handler] = None,
*,
use_cache: bool = True,
validate: Union[bool, PydanticFieldInfo] = False,
) -> Any:
"""子依赖装饰器
参数:
dependency: 依赖函数。默认为参数的类型注释。
use_cache: 是否使用缓存。默认为 `True`。
validate: 是否使用 Pydantic 类型校验。默认为 `False`。
用法:
```python
def depend_func() -> Any:
return ...
def depend_gen_func():
try:
yield ...
finally:
...
async def handler(
param_name: Any = Depends(depend_func),
gen: Any = Depends(depend_gen_func),
):
...
```
"""
return DependsInner(dependency, use_cache=use_cache, validate=validate)
The provided code snippet includes necessary dependencies for implementing the `Received` function. Write a Python function `def Received(id: Optional[str] = None, default: Any = None) -> Any` to solve the following problem:
`receive` 事件参数
Here is the function:
def Received(id: Optional[str] = None, default: Any = None) -> Any:
"""`receive` 事件参数"""
def _received(matcher: "Matcher") -> Any:
return matcher.get_receive(id or "", default)
return Depends(_received, use_cache=False) | `receive` 事件参数 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.