id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
156,467 | import torch
def get_shape(x):
shape = [it.value() for it in x._attrs["shape"]]
return shape | null |
156,468 | import json
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `get_unet_in_channels` function. Write a Python function `def get_unet_in_channels(directory: Path)` to solve the following problem:
Get the number of input channels for the UNet model.
Here is the function:
def get_unet_in_channels(directory: Path):
"Get the number of input channels for the UNet model."
try:
with directory.joinpath("config.json").open() as f:
config = json.load(f)
return int(config["unet_in_channels"])
except FileNotFoundError:
return 4 | Get the number of input channels for the UNet model. |
156,469 | import gc
import json
import logging
import os
import shutil
import time
from pathlib import Path
from typing import Tuple, Union
import torch
from aitemplate.testing import detect_target
from api import websocket_manager
from api.websockets import Data, Notification
from core.config import config
from core.inference.functions import load_pytorch_pipeline
from .src.compile_lib.clip import compile_clip
from .src.compile_lib.unet import compile_unet
from .src.compile_lib.vae import compile_vae
logger = logging.getLogger(__name__)
websocket_manager = WebSocketManager()
config = load_config()
def load_pytorch_pipeline(
model_id_or_path: str,
device: Union[str, torch.device] = "cuda",
optimize: bool = True,
is_for_aitemplate: bool = False,
) -> StableDiffusionPipeline:
"Load the model from HuggingFace"
logger.info(f"Loading {model_id_or_path} with {config.api.data_type}")
if ".ckpt" in model_id_or_path or ".safetensors" in model_id_or_path:
use_safetensors = ".safetensors" in model_id_or_path
if use_safetensors:
logger.info("Loading model as safetensors")
else:
logger.info("Loading model as checkpoint")
# This function does not inherit the channels so we need to hack it like this
in_channels = 9 if "inpaint" in model_id_or_path.casefold() else 4
type = determine_model_type(get_full_model_path(model_id_or_path))
cl = StableDiffusionXLPipeline if type[1] == "SDXL" else StableDiffusionPipeline
# I never knew this existed, but this is pretty handy :)
# cl.__init__ = partialmethod(cl.__init__, low_cpu_mem_usage=True) # type: ignore
try:
pipe = cl.from_single_file(
str(get_full_model_path(model_id_or_path)),
load_safety_checker=False,
torch_dtype=config.api.load_dtype,
resume_download=True,
num_in_channels=in_channels,
extract_ema=True,
)
except KeyError:
pipe = cl.from_single_file(
str(get_full_model_path(model_id_or_path)),
load_safety_checker=False,
torch_dtype=config.api.load_dtype,
resume_download=True,
num_in_channels=in_channels,
)
else:
pipe = DiffusionPipeline.from_pretrained(
pretrained_model_name_or_path=get_full_model_path(model_id_or_path),
torch_dtype=config.api.load_dtype,
safety_checker=None,
feature_extractor=None,
low_cpu_mem_usage=True,
)
logger.debug(f"Loaded {model_id_or_path} with {config.api.data_type}")
for name, text_encoder in [x for x in vars(pipe).items() if "text_encoder" in x[0]]:
if text_encoder is not None:
def new_forward(
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
bober=None,
):
output_hidden_states = True
original = bober.old_forward( # type: ignore
inputs_embeds,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = (_ := original[1])[: len(_) - config.api.clip_skip]
last_hidden_state = hidden_states[-1]
attentions = original[2] if output_attentions else None
if not return_dict:
return last_hidden_state, hidden_states, attentions
return BaseModelOutput(
last_hidden_state=last_hidden_state,
hidden_states=hidden_states,
attentions=attentions,
)
if config.api.clip_quantization != "full":
from transformers import BitsAndBytesConfig
from transformers.utils.bitsandbytes import (
get_keys_to_not_convert,
replace_with_bnb_linear,
set_module_quantized_tensor_to_device,
)
state_dict = text_encoder.state_dict() # type: ignore
bnbconfig = BitsAndBytesConfig(
load_in_8bit=config.api.clip_quantization == "int8",
load_in_4bit=config.api.clip_quantization == "int4",
)
dont_convert = get_keys_to_not_convert(text_encoder)
text_encoder.is_loaded_in_8bit = True # type: ignore
text_encoder.is_quantized = True # type: ignore
nt = replace_with_bnb_linear(
pipe.text_encoder.to(config.api.device, config.api.load_dtype), # type: ignore
dont_convert,
quantization_config=bnbconfig,
)
# This shouldn't even be needed, but diffusers likes meta tensors a bit too much
# Not that I don't see their purpose, it's just less general
for k, v in state_dict.items():
set_module_quantized_tensor_to_device(nt, k, config.api.device, v)
setattr(pipe, name, nt)
del state_dict, dont_convert
text_encoder.text_model.encoder.old_forward = text_encoder.text_model.encoder.forward # type: ignore
# fuck you python
# enjoy bober
text_encoder.text_model.encoder.forward = partial(new_forward, bober=text_encoder.text_model.encoder) # type: ignore
logger.debug(f"Overwritten {name}s final_layer_norm.")
if optimize:
from core.optimizations import optimize_model
optimize_model(
pipe=pipe, # type: ignore
device=device,
is_for_aitemplate=is_for_aitemplate,
)
if config.api.sfast_compile:
pipe = compile_sfast(pipe)
else:
pipe.to(device, config.api.load_dtype)
return pipe # type: ignore
def compile_clip(
pt_mod,
batch_size=(1, 8),
seqlen=64,
dim=768,
num_heads=12,
depth=12,
use_fp16_acc=False,
convert_conv_to_gemm=False,
act_layer="gelu",
constants=True,
model_name="CLIPTextModel",
work_dir="./tmp",
):
mask_seq = 0
causal = True
ait_mod = ait_CLIPTextTransformer(
num_hidden_layers=depth,
hidden_size=dim,
num_attention_heads=num_heads,
batch_size=batch_size,
seq_len=seqlen,
causal=causal,
mask_seq=mask_seq,
act_layer=act_layer,
)
ait_mod.name_parameter_tensor()
pt_mod = pt_mod.eval()
params_ait = map_clip(pt_mod)
static_shape = batch_size[0] == batch_size[1]
if static_shape:
batch_size = batch_size[0]
else:
batch_size = IntVar(values=list(batch_size), name="batch_size")
input_ids_ait = Tensor(
[batch_size, seqlen], name="input0", dtype="int64", is_input=True # type: ignore
)
position_ids_ait = Tensor(
[batch_size, seqlen], name="input1", dtype="int64", is_input=True # type: ignore
)
Y = ait_mod(input_ids=input_ids_ait, position_ids=position_ids_ait)
mark_output(Y)
target = detect_target(
use_fp16_acc=use_fp16_acc, convert_conv_to_gemm=convert_conv_to_gemm
)
compile_model(
Y, target, work_dir, model_name, constants=params_ait if constants else None
)
def compile_unet(
pt_mod,
batch_size=(1, 8),
height=(64, 2048),
width=(64, 2048),
clip_chunks=1,
work_dir="./tmp",
dim=320,
hidden_dim=1024,
use_fp16_acc=False,
convert_conv_to_gemm=False,
controlnet=False,
attention_head_dim=[5, 10, 20, 20], # noqa: B006
model_name="UNet2DConditionModel",
use_linear_projection=False,
constants=True,
block_out_channels=(320, 640, 1280, 1280),
down_block_types=(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
up_block_types=(
"UpBlock2D",
"CrossAttnUpBlock2D",
"CrossAttnUpBlock2D",
"CrossAttnUpBlock2D",
),
in_channels=4,
out_channels=4,
sample_size=64,
class_embed_type=None,
num_class_embeds=None,
only_cross_attention=[True, True, True, False],
down_factor=8,
time_embedding_dim=None,
conv_in_kernel: int = 3,
projection_class_embeddings_input_dim=None,
addition_embed_type=None,
addition_time_embed_dim=None,
transformer_layers_per_block=1,
dtype="float16",
):
xl = False
if projection_class_embeddings_input_dim is not None:
xl = True
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(block_out_channels)
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * len(
down_block_types
)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
ait_mod = ait_UNet2DConditionModel(
sample_size=sample_size,
cross_attention_dim=hidden_dim,
attention_head_dim=attention_head_dim, # type: ignore
use_linear_projection=use_linear_projection,
up_block_types=up_block_types,
down_block_types=down_block_types,
block_out_channels=block_out_channels,
in_channels=in_channels,
out_channels=out_channels,
class_embed_type=class_embed_type,
num_class_embeds=num_class_embeds,
only_cross_attention=only_cross_attention,
time_embedding_dim=time_embedding_dim,
conv_in_kernel=conv_in_kernel,
projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
addition_embed_type=addition_embed_type,
addition_time_embed_dim=addition_time_embed_dim,
transformer_layers_per_block=transformer_layers_per_block, # type: ignore
dtype=dtype,
)
ait_mod.name_parameter_tensor()
# set AIT parameters
pt_mod = pt_mod.eval()
params_ait = map_unet(
pt_mod,
dim=dim,
in_channels=in_channels,
conv_in_key="conv_in_weight",
dtype=dtype,
)
static_shape = (
width[0] == width[1]
and height[0] == height[1]
and batch_size[0] == batch_size[1]
)
if static_shape:
batch_size = batch_size[0] * 2 # double batch size for unet
height = height[0] // down_factor
width = width[0] // down_factor
height_d = height
width_d = width
height_1_d = height
width_1_d = width
height_2 = height // 2
width_2 = width // 2
height_4 = height // 4
width_4 = width // 4
height_8 = height // 8
width_8 = width // 8
height_2_d = height_2
width_2_d = width_2
height_4_d = height_4
width_4_d = width_4
height_8_d = height_8
width_8_d = width_8
else:
batch_size = batch_size[0], batch_size[1] * 2 # double batch size for unet
batch_size = IntVar(values=list(batch_size), name="batch_size")
height = height[0] // down_factor, height[1] // down_factor
width = width[0] // down_factor, width[1] // down_factor
height_d = IntVar(values=list(height), name="height_d")
width_d = IntVar(values=list(width), name="width_d")
height_1_d = IntVar(values=list(height), name="height_1_d")
width_1_d = IntVar(values=list(width), name="width_1_d")
height_2 = height[0] // 2, height[1] // 2
width_2 = width[0] // 2, width[1] // 2
height_4 = height[0] // 4, height[1] // 4
width_4 = width[0] // 4, width[1] // 4
height_8 = height[0] // 8, height[1] // 8
width_8 = width[0] // 8, width[1] // 8
height_2_d = IntVar(values=list(height_2), name="height_2_d")
width_2_d = IntVar(values=list(width_2), name="width_2_d")
height_4_d = IntVar(values=list(height_4), name="height_4_d")
width_4_d = IntVar(values=list(width_4), name="width_4_d")
height_8_d = IntVar(values=list(height_8), name="height_8_d")
width_8_d = IntVar(values=list(width_8), name="width_8_d")
if static_shape:
embedding_size = 77
else:
clip_chunks = 77, 77 * clip_chunks
embedding_size = IntVar(values=list(clip_chunks), name="embedding_size")
latent_model_input_ait = Tensor(
[batch_size, height_d, width_d, in_channels], name="input0", is_input=True, dtype=dtype # type: ignore
)
timesteps_ait = Tensor([batch_size], name="input1", is_input=True, dtype=dtype)
text_embeddings_pt_ait = Tensor(
[batch_size, embedding_size, hidden_dim], name="input2", is_input=True, dtype=dtype # type: ignore
)
class_labels = None
if in_channels == 7:
class_labels = Tensor([batch_size], name="input3", dtype="int64", is_input=True)
add_embeds = None
if xl:
add_embeds = Tensor(
[batch_size, projection_class_embeddings_input_dim], name="add_embeds", is_input=True, dtype=dtype # type: ignore
)
down_block_residual_0 = None
down_block_residual_1 = None
down_block_residual_2 = None
down_block_residual_3 = None
down_block_residual_4 = None
down_block_residual_5 = None
down_block_residual_6 = None
down_block_residual_7 = None
down_block_residual_8 = None
down_block_residual_9 = None
down_block_residual_10 = None
down_block_residual_11 = None
mid_block_residual = None
if controlnet:
down_block_residual_0 = Tensor(
[batch_size, height_1_d, width_1_d, block_out_channels[0]],
name="down_block_residual_0",
is_input=True,
)
down_block_residual_1 = Tensor(
[batch_size, height_1_d, width_1_d, block_out_channels[0]],
name="down_block_residual_1",
is_input=True,
)
down_block_residual_2 = Tensor(
[batch_size, height_1_d, width_1_d, block_out_channels[0]],
name="down_block_residual_2",
is_input=True,
)
down_block_residual_3 = Tensor(
[batch_size, height_2_d, width_2_d, block_out_channels[0]],
name="down_block_residual_3",
is_input=True,
)
down_block_residual_4 = Tensor(
[batch_size, height_2_d, width_2_d, block_out_channels[1]],
name="down_block_residual_4",
is_input=True,
)
down_block_residual_5 = Tensor(
[batch_size, height_2_d, width_2_d, block_out_channels[1]],
name="down_block_residual_5",
is_input=True,
)
down_block_residual_6 = Tensor(
[batch_size, height_4_d, width_4_d, block_out_channels[1]],
name="down_block_residual_6",
is_input=True,
)
down_block_residual_7 = Tensor(
[batch_size, height_4_d, width_4_d, block_out_channels[2]],
name="down_block_residual_7",
is_input=True,
)
down_block_residual_8 = Tensor(
[batch_size, height_4_d, width_4_d, block_out_channels[2]],
name="down_block_residual_8",
is_input=True,
)
down_block_residual_9 = Tensor(
[batch_size, height_8_d, width_8_d, block_out_channels[2]],
name="down_block_residual_9",
is_input=True,
)
down_block_residual_10 = Tensor(
[batch_size, height_8_d, width_8_d, block_out_channels[3]],
name="down_block_residual_10",
is_input=True,
)
down_block_residual_11 = Tensor(
[batch_size, height_8_d, width_8_d, block_out_channels[3]],
name="down_block_residual_11",
is_input=True,
)
mid_block_residual = Tensor(
[batch_size, height_8_d, width_8_d, block_out_channels[3]],
name="mid_block_residual",
is_input=True,
)
Y = ait_mod(
sample=latent_model_input_ait,
timesteps=timesteps_ait,
encoder_hidden_states=text_embeddings_pt_ait,
down_block_residual_0=down_block_residual_0,
down_block_residual_1=down_block_residual_1,
down_block_residual_2=down_block_residual_2,
down_block_residual_3=down_block_residual_3,
down_block_residual_4=down_block_residual_4,
down_block_residual_5=down_block_residual_5,
down_block_residual_6=down_block_residual_6,
down_block_residual_7=down_block_residual_7,
down_block_residual_8=down_block_residual_8,
down_block_residual_9=down_block_residual_9,
down_block_residual_10=down_block_residual_10,
down_block_residual_11=down_block_residual_11,
mid_block_residual=mid_block_residual,
class_labels=class_labels,
add_embeds=add_embeds,
)
mark_output(Y)
target = detect_target(
use_fp16_acc=use_fp16_acc, convert_conv_to_gemm=convert_conv_to_gemm
)
compile_model(
Y,
target,
work_dir,
model_name,
constants=params_ait if constants else None,
do_optimize_graph=False if xl else True,
)
def compile_vae(
pt_mod,
batch_size=(1, 8),
height=(64, 2048),
width=(64, 2048),
use_fp16_acc=False,
convert_conv_to_gemm=False,
model_name="AutoencoderKL",
constants=True,
block_out_channels=[128, 256, 512, 512],
layers_per_block=2,
act_fn="silu",
latent_channels=4,
sample_size=512,
in_channels=3,
out_channels=3,
down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],
up_block_types=[
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
],
input_size=(64, 64),
down_factor=8,
dtype="float16",
work_dir="./tmp",
vae_encode=False,
):
ait_vae = ait_AutoencoderKL(
batch_size[0],
input_size[0],
input_size[1],
in_channels=in_channels,
out_channels=out_channels,
down_block_types=down_block_types,
up_block_types=up_block_types,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
act_fn=act_fn,
latent_channels=latent_channels,
sample_size=sample_size,
dtype=dtype,
)
static_batch = batch_size[0] == batch_size[1]
static_shape = height[0] == height[1] and width[0] == width[1]
if not vae_encode:
height = height[0] // down_factor, height[1] // down_factor
width = width[0] // down_factor, width[1] // down_factor
if static_batch:
batch_size = batch_size[0]
else:
batch_size = IntVar(values=list(batch_size), name="batch_size")
if static_shape:
height_d = height[0]
width_d = width[0]
else:
height_d = IntVar(values=list(height), name="height")
width_d = IntVar(values=list(width), name="width")
ait_input = Tensor(
shape=[batch_size, height_d, width_d, 3 if vae_encode else latent_channels], # type: ignore
name="vae_input",
is_input=True,
dtype=dtype,
)
sample = None
if vae_encode:
sample = Tensor(
shape=[batch_size, height_d, width_d, latent_channels], # type: ignore
name="vae_sample",
is_input=True,
dtype=dtype,
)
ait_vae.name_parameter_tensor()
pt_mod = pt_mod.eval()
params_ait = map_vae(pt_mod, dtype=dtype, encoder=vae_encode)
if vae_encode:
Y = ait_vae.encode(ait_input, sample) # type: ignore
else:
Y = ait_vae.decode(ait_input)
mark_output(Y)
target = detect_target(
use_fp16_acc=use_fp16_acc, convert_conv_to_gemm=convert_conv_to_gemm
)
compile_model(
Y,
target,
work_dir,
model_name,
constants=params_ait if constants else None,
)
The provided code snippet includes necessary dependencies for implementing the `compile_diffusers` function. Write a Python function `def compile_diffusers( local_dir_or_id: str, width: Union[int, Tuple[int, int]] = 512, height: Union[int, Tuple[int, int]] = 512, batch_size: Union[int, Tuple[int, int]] = 1, clip_chunks: int = 6, convert_conv_to_gemm=True, invalidate_cache=False, device: str = "cuda", )` to solve the following problem:
Compile Stable Diffusion Pipeline to AITemplate format
Here is the function:
def compile_diffusers(
local_dir_or_id: str,
width: Union[int, Tuple[int, int]] = 512,
height: Union[int, Tuple[int, int]] = 512,
batch_size: Union[int, Tuple[int, int]] = 1,
clip_chunks: int = 6,
convert_conv_to_gemm=True,
invalidate_cache=False,
device: str = "cuda",
):
"Compile Stable Diffusion Pipeline to AITemplate format"
# Wipe out cache
if os.path.exists("~/.aitemplate/cuda.db"):
logger.info("Wiping out cache...")
os.remove("~/.aitemplate/cuda.db")
logger.info("Cache wiped out")
use_fp16_acc = config.api.data_type != "float32"
start_time = time.time()
torch.manual_seed(4896)
if detect_target().name() == "rocm":
convert_conv_to_gemm = False
pipe = load_pytorch_pipeline(
model_id_or_path=local_dir_or_id,
device=device,
)
if isinstance(width, int):
width = (width, width)
if isinstance(height, int):
height = (height, height)
if isinstance(batch_size, int):
batch_size = (batch_size, batch_size)
assert (
height[0] % 64 == 0
and height[1] % 64 == 0
and width[0] % 64 == 0
and width[1] % 64 == 0
), f"Height and Width must be multiples of 64, otherwise, the compilation process will fail. Got {height=} {width=}"
dump_dir = os.path.join(
"data",
"aitemplate",
local_dir_or_id.replace("/", "--")
+ f"__{width[0]}-{width[1]}x{height[0]}-{height[1]}x{batch_size[0]}-{batch_size[1]}",
)
websocket_manager.broadcast_sync(
Notification(
severity="info",
title="AITemplate",
message=f"Compiling {local_dir_or_id} to AITemplate format",
)
)
os.environ["NUM_BUILDERS"] = str(config.aitemplate.num_threads)
websocket_manager.broadcast_sync(
Data(
data_type="aitemplate_compile",
data={
"clip": "wait",
"unet": "wait",
"controlnet_unet": "wait",
"vae": "wait",
"cleanup": "wait",
},
)
)
# CLIP
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"clip": "process"})
)
try:
if (
invalidate_cache
or not Path(dump_dir).joinpath("CLIPTextModel/test.so").exists()
):
compile_clip(
pipe.text_encoder, # type: ignore
batch_size=batch_size,
seqlen=pipe.text_encoder.config.max_position_embeddings,
use_fp16_acc=use_fp16_acc,
constants=True,
convert_conv_to_gemm=convert_conv_to_gemm,
depth=pipe.text_encoder.config.num_hidden_layers, # type: ignore
num_heads=pipe.text_encoder.config.num_attention_heads, # type: ignore
dim=pipe.text_encoder.config.hidden_size, # type: ignore
act_layer=pipe.text_encoder.config.hidden_act, # type: ignore
work_dir=dump_dir,
)
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"clip": "finish"})
)
else:
logger.info("CLIP already compiled. Skipping...")
except Exception as e:
logger.error(e)
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"clip": "error"})
)
websocket_manager.broadcast_sync(
Notification(
severity="error",
title="AITemplate",
message=f"Error while compiling CLIP: {e}",
)
)
# UNet
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"unet": "process"})
)
try:
if (
invalidate_cache
or not Path(dump_dir).joinpath("UNet2DConditionModel/test.so").exists()
):
compile_unet(
pipe.unet, # type: ignore
batch_size=batch_size,
width=width,
height=height,
use_fp16_acc=use_fp16_acc,
work_dir=dump_dir,
convert_conv_to_gemm=convert_conv_to_gemm,
hidden_dim=pipe.unet.config.cross_attention_dim,
attention_head_dim=pipe.unet.config.attention_head_dim,
use_linear_projection=pipe.unet.config.get(
"use_linear_projection", False
),
block_out_channels=pipe.unet.config.block_out_channels,
down_block_types=pipe.unet.config.down_block_types,
up_block_types=pipe.unet.config.up_block_types,
in_channels=pipe.unet.config.in_channels,
out_channels=pipe.unet.config.out_channels,
class_embed_type=pipe.unet.config.class_embed_type,
num_class_embeds=pipe.unet.config.num_class_embeds,
only_cross_attention=pipe.unet.config.only_cross_attention,
sample_size=pipe.unet.config.sample_size,
dim=pipe.unet.config.block_out_channels[0],
time_embedding_dim=None,
down_factor=8,
clip_chunks=clip_chunks,
constants=True,
controlnet=False,
conv_in_kernel=pipe.unet.config.conv_in_kernel,
projection_class_embeddings_input_dim=pipe.unet.config.projection_class_embeddings_input_dim,
addition_embed_type=pipe.unet.config.addition_embed_type,
addition_time_embed_dim=pipe.unet.config.addition_time_embed_dim,
transformer_layers_per_block=pipe.unet.config.transformer_layers_per_block,
)
else:
logger.info("UNet already compiled. Skipping...")
# Dump UNet config
with open(
os.path.join(dump_dir, "UNet2DConditionModel", "config.json"),
"w",
encoding="utf-8",
) as f:
json.dump(pipe.unet.config, f, indent=4, ensure_ascii=False) # type: ignore
logger.info("UNet config saved")
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"unet": "finish"})
)
except Exception as e:
logger.error(e)
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"unet": "error"})
)
websocket_manager.broadcast_sync(
Notification(
severity="error",
title="AITemplate",
message=f"Error while compiling UNet: {e}",
)
)
# ControlNet UNet
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"controlnet_unet": "process"})
)
try:
if (
invalidate_cache
or not Path(dump_dir)
.joinpath("ControlNetUNet2DConditionModel/test.so")
.exists()
):
compile_unet(
pipe.unet, # type: ignore
model_name="ControlNetUNet2DConditionModel",
batch_size=batch_size,
width=width,
height=height,
use_fp16_acc=use_fp16_acc,
work_dir=dump_dir,
convert_conv_to_gemm=convert_conv_to_gemm,
hidden_dim=pipe.unet.config.cross_attention_dim,
attention_head_dim=pipe.unet.config.attention_head_dim,
use_linear_projection=pipe.unet.config.get(
"use_linear_projection", False
),
block_out_channels=pipe.unet.config.block_out_channels,
down_block_types=pipe.unet.config.down_block_types,
up_block_types=pipe.unet.config.up_block_types,
in_channels=pipe.unet.config.in_channels,
out_channels=pipe.unet.config.out_channels,
class_embed_type=pipe.unet.config.class_embed_type,
num_class_embeds=pipe.unet.config.num_class_embeds,
only_cross_attention=pipe.unet.config.only_cross_attention,
sample_size=pipe.unet.config.sample_size,
dim=pipe.unet.config.block_out_channels[0],
time_embedding_dim=None,
down_factor=8,
constants=True,
controlnet=True,
conv_in_kernel=pipe.unet.config.conv_in_kernel,
projection_class_embeddings_input_dim=pipe.unet.config.projection_class_embeddings_input_dim,
addition_embed_type=pipe.unet.config.addition_embed_type,
addition_time_embed_dim=pipe.unet.config.addition_time_embed_dim,
transformer_layers_per_block=pipe.unet.config.transformer_layers_per_block,
)
else:
logger.info("UNet already compiled. Skipping...")
# Dump UNet config
with open(
os.path.join(dump_dir, "ControlNetUNet2DConditionModel", "config.json"),
"w",
encoding="utf-8",
) as f:
json.dump(pipe.unet.config, f, indent=4, ensure_ascii=False) # type: ignore
logger.info("UNet config saved")
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"controlnet_unet": "finish"})
)
except Exception as e:
logger.error(e)
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"controlnet_unet": "error"})
)
websocket_manager.broadcast_sync(
Notification(
severity="error",
title="AITemplate",
message=f"Error while compiling ControlNet UNet: {e}",
)
)
# VAE
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"vae": "process"})
)
try:
if (
invalidate_cache
or not Path(dump_dir).joinpath("AutoencoderKL/test.so").exists()
):
compile_vae(
pipe.vae, # type: ignore
batch_size=batch_size,
width=width,
height=height,
use_fp16_acc=use_fp16_acc,
convert_conv_to_gemm=convert_conv_to_gemm,
block_out_channels=pipe.vae.config.block_out_channels,
layers_per_block=pipe.vae.config.layers_per_block,
act_fn=pipe.vae.config.act_fn,
latent_channels=pipe.vae.config.latent_channels,
in_channels=pipe.vae.config.in_channels,
out_channels=pipe.vae.config.out_channels,
down_block_types=pipe.vae.config.down_block_types,
up_block_types=pipe.vae.config.up_block_types,
sample_size=pipe.vae.config.sample_size,
input_size=(64, 64),
down_factor=8,
vae_encode=False,
constants=True,
work_dir=dump_dir,
dtype="float16" if use_fp16_acc else "float32",
)
else:
logger.info("VAE already compiled. Skipping...")
# Dump VAE config
with open(
os.path.join(dump_dir, "AutoencoderKL", "config.json"),
"w",
encoding="utf-8",
) as f:
json.dump(pipe.vae.config, f, indent=4, ensure_ascii=False) # type: ignore
logger.info("VAE config saved")
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"vae": "finish"})
)
except Exception as e:
logger.error(e)
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"vae": "error"})
)
websocket_manager.broadcast_sync(
Notification(
severity="error",
title="AITemplate",
message=f"Error while compiling VAE: {e}",
)
)
# Cleanup
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"cleanup": "process"})
)
try:
# Clean all files except test.so recursively
for root, _dirs, files in os.walk(dump_dir):
for file in files:
if file not in ["test.so", "config.json"]:
os.remove(os.path.join(root, file))
# Clean profiler (sometimes not present)
try:
shutil.rmtree(os.path.join(dump_dir, "profiler"))
except FileNotFoundError:
pass
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"cleanup": "finish"})
)
except Exception as e:
logger.error(e)
websocket_manager.broadcast_sync(
Data(data_type="aitemplate_compile", data={"cleanup": "error"})
)
websocket_manager.broadcast_sync(
Notification(
severity="error",
title="AITemplate",
message=f"Error while cleaning up: {e}",
)
)
del pipe
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
gc.collect()
deltatime = time.time() - start_time
websocket_manager.broadcast_sync(
Notification(
severity="success",
title="AITemplate",
message=f"Successfully compiled {local_dir_or_id} to AITemplate format in {deltatime:.2f} seconds",
timeout=0,
)
)
logger.info(f"Finished compiling in {deltatime:.2f} seconds") | Compile Stable Diffusion Pipeline to AITemplate format |
156,470 | from typing import TYPE_CHECKING
from discord.ext import commands
from discord.ext.commands import Cog, Context
class Core(Cog):
"Core commands"
def __init__(self, bot: "ModularBot") -> None:
self.bot = bot
async def sync(self, ctx: Context):
"Sync slash commands with the API"
await self.bot.sync()
await ctx.send("✅ Synced slash commands!")
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `async def setup(bot: "ModularBot")` to solve the following problem:
Will be called by the bot
Here is the function:
async def setup(bot: "ModularBot"):
"Will be called by the bot"
await bot.add_cog(Core(bot)) | Will be called by the bot |
156,471 | from typing import TYPE_CHECKING, Any, Dict, List
import discord
from aiohttp import ClientSession
from discord.ext import commands
from discord.ext.commands import Cog, Context
from bot import shared as shared_bot
from bot.helper import get_available_models, get_loaded_models
from core import shared
from core.types import InferenceBackend
class Models(Cog):
"Commands for interacting with the models"
def __init__(self, bot: "ModularBot") -> None:
super().__init__()
self.bot = bot
async def loaded_models(self, ctx: Context) -> None:
"Show models loaded in the API"
async with ClientSession() as session:
async with session.get(
f"http://localhost:{shared.api_port}/api/models/loaded"
) as r:
status = r.status
response: List[Dict[str, Any]] = await r.json()
models = []
if status == 200:
for model in response:
models.append(f"{model['name']} - {model['backend']}")
embed = discord.Embed(
title="Loaded Models",
description="\n".join(models)
if len(models) > 0
else "No models loaded",
)
await ctx.send(embed=embed)
else:
await ctx.send(f"Error: {status}")
async def available_models(self, ctx: Context) -> None:
"List all available models"
available_models, status = await get_available_models()
shared_bot.models.set_cached_available_models(available_models)
available_models, status = await shared_bot.models.cached_available_models()
loaded_models, status = await get_loaded_models()
shared_bot.models.set_cached_loaded_models(loaded_models)
if status == 200:
await ctx.send(
"Available models:\n`{}`".format("\n".join(available_models))
)
else:
await ctx.send(f"Error: {status}")
async def load_model(
self,
ctx: Context,
model: str,
backend: InferenceBackend = "PyTorch",
) -> None:
"Load a model"
message = await ctx.send(f"Loading model {model}...")
async with ClientSession() as session:
async with session.post(
f"http://localhost:{shared.api_port}/api/models/load",
params={"model": model, "backend": backend},
) as response:
status = response.status
response = await response.json()
if status == 200:
await message.edit(content=f"{response['message']}: {model}")
else:
await message.edit(content=f"Error: **{response.get('detail')}**")
async def unload_model(self, ctx: Context, model: str) -> None:
"Unload a model"
message = await ctx.send(f"Unloading model {model}...")
async with ClientSession() as session:
async with session.post(
f"http://localhost:{shared.api_port}/api/models/unload",
params={"model": model},
) as response:
status = response.status
response = await response.json()
if status == 200:
await message.edit(content=f"{response['message']}: {model}")
else:
await message.edit(content=f"Error: {status}")
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `async def setup(bot: "ModularBot") -> None` to solve the following problem:
Will be called by the bot
Here is the function:
async def setup(bot: "ModularBot") -> None:
"Will be called by the bot"
ext = Models(bot)
await bot.add_cog(ext) | Will be called by the bot |
156,472 | import logging
import random
import re
from typing import TYPE_CHECKING, Optional
from uuid import uuid4
import discord
from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers
from discord import File
from discord.ext import commands
from discord.ext.commands import Cog, Context
from bot.helper import find_closest_model, inference_call
from bot.shared import config
from core.utils import convert_base64_to_bytes
class Text2Image(Cog):
"Commands for generating images from text"
def __init__(self, bot: "ModularBot") -> None:
self.bot = bot
async def dream_unsupported(
self,
ctx: Context,
prompt: str,
model: str,
negative_prompt: str = "",
guidance_scale: float = config.default_cfg,
steps: int = config.default_steps,
width: int = config.default_width,
height: int = config.default_height,
count: int = config.default_count,
seed: Optional[int] = None,
scheduler: KarrasDiffusionSchedulers = config.default_scheduler,
verbose: bool = config.default_verbose,
):
"Generate an image from prompt"
if seed is None:
seed = random.randint(0, 1000000)
if config.max_width < width or config.max_height < height:
return await ctx.send(
f"Image size is too big, maximum size is {config.max_width}x{config.max_height}"
)
if config.max_count < count:
return await ctx.send(
f"Image count is too big, maximum count is {config.max_count}"
)
prompt = prompt + config.extra_prompt
negative_prompt = negative_prompt + config.extra_negative_prompt
try:
model = await find_closest_model(model)
except IndexError:
await ctx.send(f"No loaded model that is close to `{model}` found")
return
payload = {
"data": {
"prompt": prompt,
"id": uuid4().hex,
"negative_prompt": negative_prompt,
"width": width,
"height": height,
"steps": steps,
"guidance_scale": guidance_scale,
"seed": seed,
"batch_size": 1,
"batch_count": count,
"scheduler": scheduler.value,
},
"model": model,
"save_image": False,
}
message = await ctx.send(f"Generating image with `{model}`...")
try:
status, response = await inference_call(payload=payload)
except Exception as e:
raise e
if status == 200:
if verbose:
embed = discord.Embed(
color=discord.Color.green(),
)
embed.add_field(name="Seed", value=seed)
embed.add_field(name="Time", value=f"{response.get('time'):.2f}s")
embed.add_field(name="Model", value=model)
embed.add_field(name="Negative Prompt", value=negative_prompt)
embed.add_field(name="Guidance Scale", value=guidance_scale)
embed.add_field(name="Steps", value=steps)
embed.add_field(name="Width", value=width)
embed.add_field(name="Height", value=height)
await message.edit(embed=embed)
await message.edit(
content=f"{ctx.author.mention} - **{prompt}**, Time: {response.get('time'):.2f}s, Seed: {seed}"
)
file_array = [
File(
convert_base64_to_bytes(re.sub(pattern, "", x)),
filename=f"{seed}.png",
)
for x in response["images"]
]
await message.add_files(*file_array[len(file_array) - count :])
else:
if response.get("detail"):
await message.edit(
content=f"{ctx.author.mention} Dream failed - **{response.get('detail')}**"
)
else:
await message.edit(
content=f"{ctx.author.mention} Dream failed - {status}"
)
logger.info(f"Finished task {prompt} for {str(ctx.author)}")
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `async def setup(bot: "ModularBot")` to solve the following problem:
Will be called by the bot
Here is the function:
async def setup(bot: "ModularBot"):
"Will be called by the bot"
await bot.add_cog(Text2Image(bot)) | Will be called by the bot |
156,473 | from typing import TYPE_CHECKING
import discord
from discord.ext import commands
from discord.ext.commands import Cog
class Listeners(Cog):
"Middlewares for the bot"
def __init__(self, bot: "ModularBot") -> None:
self.bot = bot
async def on_ready(self) -> None:
"When the bot is connected and ready to operate"
await self.bot.change_presence(
status=discord.Status.online, activity=discord.Game("VoltaML - /dream")
)
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `async def setup(bot: "ModularBot") -> None` to solve the following problem:
Load the cog
Here is the function:
async def setup(bot: "ModularBot") -> None:
"Load the cog"
await bot.add_cog(Listeners(bot)) | Load the cog |
156,474 | import logging
from dataclasses import dataclass, field
from dataclasses_json.api import DataClassJsonMixin
from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers
logger = logging.getLogger(__name__)
class Config(DataClassJsonMixin):
"Dataclass that will store the configuration for the bot"
supported_models: dict[str, str] = field(default_factory=dict)
prefix: str = "!"
extra_prompt: str = ""
extra_negative_prompt: str = ""
max_width: int = 1920
max_height: int = 1920
max_count: int = 4
max_steps: int = 50
default_width: int = 512
default_height: int = 512
default_count: int = 1
default_steps: int = 30
default_scheduler: KarrasDiffusionSchedulers = (
KarrasDiffusionSchedulers.DPMSolverMultistepScheduler
)
default_cfg: float = 7.0
default_verbose: bool = False
def save_config(config: Config):
"Save the configuration to a file"
logger.info("Saving configuration to data/bot.json")
with open("data/bot.json", "w", encoding="utf-8") as f:
f.write(config.to_json(ensure_ascii=False, indent=4))
The provided code snippet includes necessary dependencies for implementing the `load_config` function. Write a Python function `def load_config()` to solve the following problem:
Load the configuration from a file
Here is the function:
def load_config():
"Load the configuration from a file"
logger.info("Loading configuration from data/bot.json")
try:
with open("data/bot.json", "r", encoding="utf-8") as f:
config = Config.from_json(f.read())
logger.info("Configuration loaded from data/bot.json")
return config
except FileNotFoundError:
logger.info("data/bot.json not found, creating a new one")
config = Config()
save_config(config)
logger.info("Configuration saved to data/bot.json")
return config | Load the configuration from a file |
156,475 | from typing import TYPE_CHECKING, Dict
import discord
from aiohttp import ClientSession
from discord.ext import commands
from discord.ext.commands import Cog, Context
from core import shared
class Hardware(Cog):
"Hardware commands"
def __init__(self, bot: "ModularBot"):
self.bot = bot
async def gpus(self, ctx: Context):
"List all available GPUs"
async with ClientSession() as session:
async with session.get(
f"http://localhost:{shared.api_port}/api/hardware/gpus"
) as resp:
status = resp.status
data: Dict[str, Dict] = await resp.json()
if status != 200:
await ctx.send("Something went wrong")
return
embed = discord.Embed(title="GPUs", color=0x00FF00)
for i, gpu in data.items():
embed.add_field(
name=f"GPU {i}",
value=(
f"Name: {gpu['name']}\n" f"Total memory: {gpu['total_memory']}\n"
),
)
await ctx.send(embed=embed)
async def clean(self, ctx: Context):
"Free the memory manually"
await ctx.defer()
async with ClientSession() as session:
async with session.post(
f"http://localhost:{shared.api_port}/api/models/memory-cleanup"
) as resp:
status = resp.status
if status != 200:
await ctx.send("Something went wrong")
return
await ctx.send("Cleaned up the memory")
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `async def setup(bot: "ModularBot")` to solve the following problem:
Will be loaded by bot
Here is the function:
async def setup(bot: "ModularBot"):
"Will be loaded by bot"
await bot.add_cog(Hardware(bot)) | Will be loaded by bot |
156,476 | import logging
import random
import re
from io import BytesIO
from typing import TYPE_CHECKING, Optional
from uuid import uuid4
import discord
import requests
from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers
from discord import File
from discord.ext import commands
from discord.ext.commands import Cog, Context
from PIL import Image
from bot.helper import find_closest_model, inference_call
from bot.shared import config
from core.utils import convert_base64_to_bytes, convert_image_to_base64
class Image2Image(Cog):
"Commands for generating images from text"
def __init__(self, bot: "ModularBot") -> None:
self.bot = bot
async def dream_unsupported(
self,
ctx: Context,
prompt: str,
model: str,
negative_prompt: str = "",
guidance_scale: float = config.default_cfg,
steps: int = config.default_steps,
width: int = config.default_width,
height: int = config.default_height,
count: int = config.default_count,
seed: Optional[int] = None,
scheduler: KarrasDiffusionSchedulers = config.default_scheduler,
verbose: bool = config.default_verbose,
):
"Generate an image from prompt"
print(ctx.message.attachments)
input_image = ctx.message.attachments[0].url
init_image = Image.open(
BytesIO(requests.get(input_image, timeout=10).content)
).convert("RGB")
init_image_byte64 = convert_image_to_base64(init_image)
if seed is None:
seed = random.randint(0, 1000000)
if config.max_width < width or config.max_height < height:
return await ctx.send(
f"Image size is too big, maximum size is {config.max_width}x{config.max_height}"
)
if config.max_count < count:
return await ctx.send(
f"Image count is too big, maximum count is {config.max_count}"
)
prompt = prompt + config.extra_prompt
negative_prompt = negative_prompt + config.extra_negative_prompt
try:
model = await find_closest_model(model)
except IndexError:
await ctx.send(f"No loaded model that is close to `{model}` found")
return
payload = {
"data": {
"prompt": prompt,
"id": uuid4().hex,
"negative_prompt": negative_prompt,
"image": init_image_byte64,
"width": width,
"height": height,
"steps": steps,
"guidance_scale": guidance_scale,
"seed": seed,
"batch_size": 1,
"batch_count": count,
"scheduler": scheduler.value,
},
"model": model,
"save_image": False,
}
message = await ctx.send(f"Generating image with `{model}`...")
try:
status, response = await inference_call(payload=payload, target="img2img")
except Exception as e:
raise e
if status == 200:
if verbose:
embed = discord.Embed(
color=discord.Color.green(),
)
embed.add_field(name="Seed", value=seed)
embed.add_field(name="Time", value=f"{response.get('time'):.2f}s")
embed.add_field(name="Model", value=model)
embed.add_field(name="Negative Prompt", value=negative_prompt)
embed.add_field(name="Guidance Scale", value=guidance_scale)
embed.add_field(name="Steps", value=steps)
embed.add_field(name="Width", value=width)
embed.add_field(name="Height", value=height)
await message.edit(embed=embed)
await message.edit(
content=f"{ctx.author.mention} - **{prompt}**, Time: {response.get('time'):.2f}s, Seed: {seed}"
)
file_array = [
File(
convert_base64_to_bytes(re.sub(pattern, "", x)),
filename=f"{seed}.png",
)
for x in response["images"]
]
await message.add_files(*file_array[len(file_array) - count :])
else:
if response.get("detail"):
await message.edit(
content=f"{ctx.author.mention} Dream failed - **{response.get('detail')}**"
)
else:
await message.edit(
content=f"{ctx.author.mention} Dream failed - {status}"
)
logger.info(f"Finished task {prompt} for {str(ctx.author)}")
The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `async def setup(bot: "ModularBot")` to solve the following problem:
Will be called by the bot
Here is the function:
async def setup(bot: "ModularBot"):
"Will be called by the bot"
await bot.add_cog(Image2Image(bot)) | Will be called by the bot |
156,477 | import asyncio
import difflib
from typing import Any, Dict, List, Literal
import aiohttp
from aiohttp import ClientSession
from bot import shared as shared_bot
The provided code snippet includes necessary dependencies for implementing the `find_closest_model` function. Write a Python function `async def find_closest_model(model: str)` to solve the following problem:
Find the closest model to the one provided
Here is the function:
async def find_closest_model(model: str):
"""Find the closest model to the one provided"""
models, _ = await shared_bot.models.cached_loaded_models()
return difflib.get_close_matches(model, models, n=1, cutoff=0.1)[0] | Find the closest model to the one provided |
156,478 | import asyncio
import difflib
from typing import Any, Dict, List, Literal
import aiohttp
from aiohttp import ClientSession
from bot import shared as shared_bot
The provided code snippet includes necessary dependencies for implementing the `inference_call` function. Write a Python function `async def inference_call( payload: Dict, target: Literal["txt2img", "img2img"] = "txt2img" )` to solve the following problem:
Call to the backend to generate an image
Here is the function:
async def inference_call(
payload: Dict, target: Literal["txt2img", "img2img"] = "txt2img"
):
"Call to the backend to generate an image"
from core import shared
async def call():
async with aiohttp.ClientSession() as session:
async with session.post(
f"http://localhost:{shared.api_port}/api/generate/{target}",
json=payload,
) as response:
status = response.status
response = await response.json()
return status, response
try:
status, response = await call()
except aiohttp.ClientOSError:
await asyncio.sleep(0.5)
status, response = await call()
return status, response | Call to the backend to generate an image |
156,479 | import asyncio
import difflib
from typing import Any, Dict, List, Literal
import aiohttp
from aiohttp import ClientSession
from bot import shared as shared_bot
The provided code snippet includes necessary dependencies for implementing the `get_available_models` function. Write a Python function `async def get_available_models()` to solve the following problem:
List all available models
Here is the function:
async def get_available_models():
"List all available models"
from core import shared
async with ClientSession() as session:
async with session.get(
f"http://localhost:{shared.api_port}/api/models/available"
) as response:
status = response.status
data: List[Dict[str, Any]] = await response.json()
models = [
i["name"]
for i in filter(
lambda model: (
model["valid"] is True
and (
model["backend"] == "PyTorch"
or model["backend"] == "AITemplate"
)
),
data,
)
]
return models, status | List all available models |
156,480 | import asyncio
import difflib
from typing import Any, Dict, List, Literal
import aiohttp
from aiohttp import ClientSession
from bot import shared as shared_bot
The provided code snippet includes necessary dependencies for implementing the `get_loaded_models` function. Write a Python function `async def get_loaded_models()` to solve the following problem:
List all available models
Here is the function:
async def get_loaded_models():
"List all available models"
from core import shared
async with ClientSession() as session:
async with session.get(
f"http://localhost:{shared.api_port}/api/models/loaded"
) as response:
status = response.status
data: List[Dict[str, Any]] = await response.json()
models = [
i["name"]
for i in filter(
lambda model: (
model["valid"] is True
and (
model["backend"] == "PyTorch"
or model["backend"] == "AITemplate"
)
),
data,
)
]
return models, status | List all available models |
156,481 | import json
import struct
from argparse import ArgumentParser
from pathlib import Path
from typing import Union
def parse(file: Path) -> Union[dict, list]:
if file.suffix == ".safetensors":
with open(file, "rb") as f:
ff = struct.unpack("<Q", f.read(8))[0]
return json.loads(f.read(ff))
elif file.suffix == ".ckpt":
# We will need to load the whole model into memory
import torch
state_dict = torch.load(file)["state_dict"]
all_keys = []
for item in state_dict.items():
all_keys.append(item[0])
return all_keys
elif file.is_dir():
if not (file / "model_index.json").exists():
raise ValueError("Unparseable folder, missing model_index.json")
with open(file / "model_index.json", "r") as f:
return json.loads(f.read())
else:
raise ValueError("Unparseable file") | null |
156,482 | import argparse
import os.path as osp
import torch
from safetensors.torch import save_file
unet_conversion_map = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
unet_conversion_map_resnet = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
unet_conversion_map_layer = []
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
def convert_unet_state_dict(unet_state_dict):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
mapping = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
mapping[hf_name] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
v = v.replace(hf_part, sd_part)
mapping[k] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
v = v.replace(hf_part, sd_part)
mapping[k] = v
new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict | null |
156,483 | import argparse
import os.path as osp
import torch
from safetensors.torch import save_file
vae_conversion_map = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
vae_conversion_map_attn = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
# Required for SonicDiffusion
("k.", "to_k."),
("q.", "to_q."),
("v.", "to_v."),
]
def reshape_weight_for_sd(w):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape, 1, 1)
def convert_vae_state_dict(vae_state_dict):
mapping = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
v = v.replace(hf_part, sd_part)
mapping[k] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
v = v.replace(hf_part, sd_part)
mapping[k] = v
new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
weights_to_convert = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format")
new_state_dict[k] = reshape_weight_for_sd(v)
return new_state_dict | null |
156,484 | import argparse
import os.path as osp
import torch
from safetensors.torch import save_file
def convert_text_enc_state_dict(text_enc_dict):
return text_enc_dict | null |
156,485 | import re
from pathlib import Path
import jaconv
import torch
from PIL import Image
from loguru import logger
from transformers import AutoFeatureExtractor, AutoTokenizer, VisionEncoderDecoderModel
def post_process(text):
text = ''.join(text.split())
text = text.replace('…', '...')
text = re.sub('[・.]{2,}', lambda x: (x.end() - x.start()) * '.', text)
text = jaconv.h2z(text, ascii=True, digit=True)
return text | null |
156,486 | import sys
import time
from pathlib import Path
import fire
import numpy as np
import pyperclip
from PIL import Image
from PIL import UnidentifiedImageError
from loguru import logger
from manga_ocr import MangaOcr
def are_images_identical(img1, img2):
if None in (img1, img2):
return img1 == img2
img1 = np.array(img1)
img2 = np.array(img2)
return (img1.shape == img2.shape) and (img1 == img2).all()
def process_and_write_results(mocr, img_or_path, write_to):
t0 = time.time()
text = mocr(img_or_path)
t1 = time.time()
logger.info(f'Text recognized in {t1 - t0:0.03f} s: {text}')
if write_to == 'clipboard':
pyperclip.copy(text)
else:
write_to = Path(write_to)
if write_to.suffix != '.txt':
raise ValueError('write_to must be either "clipboard" or a path to a text file')
with write_to.open('a', encoding="utf-8") as f:
f.write(text + '\n')
def get_path_key(path):
return path, path.lstat().st_mtime
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(read_from='clipboard', write_to='clipboard', pretrained_model_name_or_path='kha-white/manga-ocr-base', force_cpu=False, delay_secs=0.1, verbose=False )` to solve the following problem:
Run OCR in the background, waiting for new images to appear either in system clipboard, or a directory. Recognized texts can be either saved to system clipboard, or appended to a text file. :param read_from: Specifies where to read input images from. Can be either "clipboard", or a path to a directory. :param write_to: Specifies where to save recognized texts to. Can be either "clipboard", or a path to a text file. :param pretrained_model_name_or_path: Path to a trained model, either local or from Transformers' model hub. :param force_cpu: If True, OCR will use CPU even if GPU is available. :param verbose: If True, unhides all warnings. :param delay_secs: How often to check for new images, in seconds.
Here is the function:
def run(read_from='clipboard',
write_to='clipboard',
pretrained_model_name_or_path='kha-white/manga-ocr-base',
force_cpu=False,
delay_secs=0.1,
verbose=False
):
"""
Run OCR in the background, waiting for new images to appear either in system clipboard, or a directory.
Recognized texts can be either saved to system clipboard, or appended to a text file.
:param read_from: Specifies where to read input images from. Can be either "clipboard", or a path to a directory.
:param write_to: Specifies where to save recognized texts to. Can be either "clipboard", or a path to a text file.
:param pretrained_model_name_or_path: Path to a trained model, either local or from Transformers' model hub.
:param force_cpu: If True, OCR will use CPU even if GPU is available.
:param verbose: If True, unhides all warnings.
:param delay_secs: How often to check for new images, in seconds.
"""
mocr = MangaOcr(pretrained_model_name_or_path, force_cpu)
if sys.platform not in ('darwin', 'win32') and write_to == 'clipboard':
# Check if the system is using Wayland
import os
if os.environ.get('WAYLAND_DISPLAY'):
# Check if the wl-clipboard package is installed
if os.system("which wl-copy > /dev/null") == 0:
pyperclip.set_clipboard("wl-clipboard")
else:
msg = 'Your session uses wayland and does not have wl-clipboard installed. ' \
'Install wl-clipboard for write in clipboard to work.'
raise NotImplementedError(msg)
if read_from == 'clipboard':
from PIL import ImageGrab
logger.info('Reading from clipboard')
img = None
while True:
old_img = img
try:
img = ImageGrab.grabclipboard()
except OSError as error:
if not verbose and "cannot identify image file" in str(error):
# Pillow error when clipboard hasn't changed since last grab (Linux)
pass
elif not verbose and "target image/png not available" in str(error):
# Pillow error when clipboard contains text (Linux, X11)
pass
else:
logger.warning('Error while reading from clipboard ({})'.format(error))
else:
if isinstance(img, Image.Image) and not are_images_identical(img, old_img):
process_and_write_results(mocr, img, write_to)
time.sleep(delay_secs)
else:
read_from = Path(read_from)
if not read_from.is_dir():
raise ValueError('read_from must be either "clipboard" or a path to a directory')
logger.info(f'Reading from directory {read_from}')
old_paths = set()
for path in read_from.iterdir():
old_paths.add(get_path_key(path))
while True:
for path in read_from.iterdir():
path_key = get_path_key(path)
if path_key not in old_paths:
old_paths.add(path_key)
try:
img = Image.open(path)
img.load()
except (UnidentifiedImageError, OSError) as e:
logger.warning(f'Error while reading file {path}: {e}')
else:
process_and_write_results(mocr, img, write_to)
time.sleep(delay_secs) | Run OCR in the background, waiting for new images to appear either in system clipboard, or a directory. Recognized texts can be either saved to system clipboard, or appended to a text file. :param read_from: Specifies where to read input images from. Can be either "clipboard", or a path to a directory. :param write_to: Specifies where to save recognized texts to. Can be either "clipboard", or a path to a text file. :param pretrained_model_name_or_path: Path to a trained model, either local or from Transformers' model hub. :param force_cpu: If True, OCR will use CPU even if GPU is available. :param verbose: If True, unhides all warnings. :param delay_secs: How often to check for new images, in seconds. |
156,487 | import xml.etree.ElementTree as ET
from pathlib import Path
import cv2
import pandas as pd
from tqdm import tqdm
from manga_ocr_dev.env import MANGA109_ROOT
def get_books():
root = MANGA109_ROOT / 'Manga109s_released_2021_02_28'
books = (root / 'books.txt').read_text().splitlines()
books = pd.DataFrame({
'book': books,
'annotations': [str(root / 'annotations' / f'{book}.xml') for book in books],
'images': [str(root / 'images' / book) for book in books],
})
return books
MANGA109_ROOT = Path('~/data/manga/Manga109s').expanduser()
def export_frames():
books = get_books()
data = []
for book in tqdm(books.itertuples(), total=len(books)):
tree = ET.parse(book.annotations)
root = tree.getroot()
for page in root.findall('./pages/page'):
for frame in page.findall('./frame'):
row = {}
row['book'] = book.book
row['page_index'] = int(page.attrib['index'])
row['page_path'] = str(Path(book.images) / f'{row["page_index"]:03d}.jpg')
row['page_width'] = int(page.attrib['width'])
row['page_height'] = int(page.attrib['height'])
row['id'] = frame.attrib['id']
row['xmin'] = int(frame.attrib['xmin'])
row['ymin'] = int(frame.attrib['ymin'])
row['xmax'] = int(frame.attrib['xmax'])
row['ymax'] = int(frame.attrib['ymax'])
data.append(row)
data = pd.DataFrame(data)
data.page_path = data.page_path.apply(lambda x: '/'.join(Path(x).parts[-4:]))
data.to_csv(MANGA109_ROOT / 'frames.csv', index=False) | null |
156,488 | import xml.etree.ElementTree as ET
from pathlib import Path
import cv2
import pandas as pd
from tqdm import tqdm
from manga_ocr_dev.env import MANGA109_ROOT
def get_books():
root = MANGA109_ROOT / 'Manga109s_released_2021_02_28'
books = (root / 'books.txt').read_text().splitlines()
books = pd.DataFrame({
'book': books,
'annotations': [str(root / 'annotations' / f'{book}.xml') for book in books],
'images': [str(root / 'images' / book) for book in books],
})
return books
MANGA109_ROOT = Path('~/data/manga/Manga109s').expanduser()
def export_crops():
crops_root = MANGA109_ROOT / 'crops'
crops_root.mkdir(parents=True, exist_ok=True)
margin = 10
books = get_books()
data = []
for book in tqdm(books.itertuples(), total=len(books)):
tree = ET.parse(book.annotations)
root = tree.getroot()
for page in root.findall('./pages/page'):
for text in page.findall('./text'):
row = {}
row['book'] = book.book
row['page_index'] = int(page.attrib['index'])
row['page_path'] = str(Path(book.images) / f'{row["page_index"]:03d}.jpg')
row['page_width'] = int(page.attrib['width'])
row['page_height'] = int(page.attrib['height'])
row['id'] = text.attrib['id']
row['text'] = text.text
row['xmin'] = int(text.attrib['xmin'])
row['ymin'] = int(text.attrib['ymin'])
row['xmax'] = int(text.attrib['xmax'])
row['ymax'] = int(text.attrib['ymax'])
data.append(row)
data = pd.DataFrame(data)
n_test = int(0.1 * len(data))
data['split'] = 'train'
data.loc[data.sample(len(data)).iloc[:n_test].index, 'split'] = 'test'
data['crop_path'] = str(crops_root) + '\\' + data.id + '.png'
data.page_path = data.page_path.apply(lambda x: '/'.join(Path(x).parts[-4:]))
data.crop_path = data.crop_path.apply(lambda x: '/'.join(Path(x).parts[-2:]))
data.to_csv(MANGA109_ROOT / 'data.csv', index=False)
for page_path, boxes in tqdm(data.groupby('page_path'), total=data.page_path.nunique()):
img = cv2.imread(str(MANGA109_ROOT / page_path))
for box in boxes.itertuples():
xmin = max(box.xmin - margin, 0)
xmax = min(box.xmax + margin, img.shape[1])
ymin = max(box.ymin - margin, 0)
ymax = min(box.ymax + margin, img.shape[0])
crop = img[ymin:ymax, xmin:xmax]
out_path = (crops_root / box.id).with_suffix('.png')
cv2.imwrite(str(out_path), crop) | null |
156,489 | from pathlib import Path
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from manga_ocr_dev.env import MANGA109_ROOT, BACKGROUND_DIR
def find_rectangle(mask, y, x, aspect_ratio_range=(0.33, 3.0)):
ymin_ = ymax_ = y
xmin_ = xmax_ = x
ymin = ymax = xmin = xmax = None
while True:
if ymin is None:
ymin_ -= 1
if ymin_ == 0 or mask[ymin_, xmin_:xmax_].any():
ymin = ymin_
if ymax is None:
ymax_ += 1
if ymax_ == mask.shape[0] - 1 or mask[ymax_, xmin_:xmax_].any():
ymax = ymax_
if xmin is None:
xmin_ -= 1
if xmin_ == 0 or mask[ymin_:ymax_, xmin_].any():
xmin = xmin_
if xmax is None:
xmax_ += 1
if xmax_ == mask.shape[1] - 1 or mask[ymin_:ymax_, xmax_].any():
xmax = xmax_
h = ymax_ - ymin_
w = xmax_ - xmin_
if h > 1 and w > 1:
ratio = w / h
if ratio < aspect_ratio_range[0] or ratio > aspect_ratio_range[1]:
return ymin_, ymax_, xmin_, xmax_
if None not in (ymin, ymax, xmin, xmax):
return ymin, ymax, xmin, xmax
BACKGROUND_DIR = Path('~/data/manga/Manga109s/background').expanduser()
MANGA109_ROOT = Path('~/data/manga/Manga109s').expanduser()
def generate_backgrounds(crops_per_page=5, min_size=40):
data = pd.read_csv(MANGA109_ROOT / 'data.csv')
frames_df = pd.read_csv(MANGA109_ROOT / 'frames.csv')
BACKGROUND_DIR.mkdir(parents=True, exist_ok=True)
page_paths = data.page_path.unique()
for page_path in tqdm(page_paths):
page = cv2.imread(str(MANGA109_ROOT / page_path))
mask = np.zeros((page.shape[0], page.shape[1]), dtype=bool)
for row in data[data.page_path == page_path].itertuples():
mask[row.ymin:row.ymax, row.xmin:row.xmax] = True
frames_mask = np.zeros((page.shape[0], page.shape[1]), dtype=bool)
for row in frames_df[frames_df.page_path == page_path].itertuples():
frames_mask[row.ymin:row.ymax, row.xmin:row.xmax] = True
mask = mask | ~frames_mask
if mask.all():
continue
unmasked_points = np.stack(np.where(~mask), axis=1)
for i in range(crops_per_page):
p = unmasked_points[np.random.randint(0, unmasked_points.shape[0])]
y, x = p
ymin, ymax, xmin, xmax = find_rectangle(mask, y, x)
crop = page[ymin:ymax, xmin:xmax]
if crop.shape[0] >= min_size and crop.shape[1] >= min_size:
out_filename = '_'.join(
Path(page_path).with_suffix('').parts[-2:]) + f'_{ymin}_{ymax}_{xmin}_{xmax}.png'
cv2.imwrite(str(BACKGROUND_DIR / out_filename), crop) | null |
156,490 | import pandas as pd
import unicodedata
from manga_ocr_dev.env import ASSETS_PATH, FONTS_ROOT
def get_background_df(background_dir):
background_df = []
for path in background_dir.iterdir():
ymin, ymax, xmin, xmax = [int(v) for v in path.stem.split('_')[-4:]]
h = ymax - ymin
w = xmax - xmin
ratio = w / h
background_df.append({
'path': str(path),
'h': h,
'w': w,
'ratio': ratio,
})
background_df = pd.DataFrame(background_df)
return background_df | null |
156,491 | import pandas as pd
import unicodedata
from manga_ocr_dev.env import ASSETS_PATH, FONTS_ROOT
def is_kanji(ch):
return 'CJK UNIFIED IDEOGRAPH' in unicodedata.name(ch) | null |
156,492 | import pandas as pd
import unicodedata
from manga_ocr_dev.env import ASSETS_PATH, FONTS_ROOT
def is_ascii(ch):
return ord(ch) < 128 | null |
156,493 | import pandas as pd
import unicodedata
from manga_ocr_dev.env import ASSETS_PATH, FONTS_ROOT
def is_hiragana(ch):
return 'HIRAGANA' in unicodedata.name(ch)
def is_katakana(ch):
return 'KATAKANA' in unicodedata.name(ch)
ASSETS_PATH = Path(__file__).parent.parent / 'assets'
def get_charsets(vocab_path=None):
if vocab_path is None:
vocab_path = ASSETS_PATH / 'vocab.csv'
vocab = pd.read_csv(vocab_path).char.values
hiragana = vocab[[is_hiragana(c) for c in vocab]][:-6]
katakana = vocab[[is_katakana(c) for c in vocab]][3:]
return vocab, hiragana, katakana | null |
156,494 | import pandas as pd
import unicodedata
from manga_ocr_dev.env import ASSETS_PATH, FONTS_ROOT
ASSETS_PATH = Path(__file__).parent.parent / 'assets'
FONTS_ROOT = Path('~/data/jp_fonts').expanduser()
def get_font_meta():
df = pd.read_csv(ASSETS_PATH / 'fonts.csv')
df.font_path = df.font_path.apply(lambda x: str(FONTS_ROOT / x))
font_map = {row.font_path: set(row.supported_chars) for row in df.dropna().itertuples()}
return df, font_map | null |
156,495 | import os
import uuid
import albumentations as A
import cv2
import numpy as np
from html2image import Html2Image
from manga_ocr_dev.env import BACKGROUND_DIR
from manga_ocr_dev.synthetic_data_generator.utils import get_background_df
def crop_by_alpha(img, margin):
y, x = np.where(img[:, :, 3] > 0)
ymin = y.min()
ymax = y.max()
xmin = x.min()
xmax = x.max()
img = img[ymin:ymax, xmin:xmax]
img = np.pad(img, ((margin, margin), (margin, margin), (0, 0)))
return img | null |
156,496 | import os
import uuid
import albumentations as A
import cv2
import numpy as np
from html2image import Html2Image
from manga_ocr_dev.env import BACKGROUND_DIR
from manga_ocr_dev.synthetic_data_generator.utils import get_background_df
def blend(img, background):
alpha = (img[:, :, 3] / 255)[:, :, np.newaxis]
img = img[:, :, :3]
img = (background * (1 - alpha) + img * alpha).astype(np.uint8)
return img | null |
156,497 | import os
import uuid
import albumentations as A
import cv2
import numpy as np
from html2image import Html2Image
from manga_ocr_dev.env import BACKGROUND_DIR
from manga_ocr_dev.synthetic_data_generator.utils import get_background_df
The provided code snippet includes necessary dependencies for implementing the `rounded_rectangle` function. Write a Python function `def rounded_rectangle(src, top_left, bottom_right, radius=1, color=255, thickness=1, line_type=cv2.LINE_AA)` to solve the following problem:
From https://stackoverflow.com/a/60210706
Here is the function:
def rounded_rectangle(src, top_left, bottom_right, radius=1, color=255, thickness=1, line_type=cv2.LINE_AA):
"""From https://stackoverflow.com/a/60210706"""
# corners:
# p1 - p2
# | |
# p4 - p3
p1 = top_left
p2 = (bottom_right[0], top_left[1])
p3 = bottom_right
p4 = (top_left[0], bottom_right[1])
height = abs(bottom_right[1] - top_left[1])
width = abs(bottom_right[0] - top_left[0])
if radius > 1:
radius = 1
corner_radius = int(radius * (min(height, width) / 2))
if thickness < 0:
# big rect
top_left_main_rect = (int(p1[0] + corner_radius), int(p1[1]))
bottom_right_main_rect = (int(p3[0] - corner_radius), int(p3[1]))
top_left_rect_left = (p1[0], p1[1] + corner_radius)
bottom_right_rect_left = (p4[0] + corner_radius, p4[1] - corner_radius)
top_left_rect_right = (p2[0] - corner_radius, p2[1] + corner_radius)
bottom_right_rect_right = (p3[0], p3[1] - corner_radius)
all_rects = [
[top_left_main_rect, bottom_right_main_rect],
[top_left_rect_left, bottom_right_rect_left],
[top_left_rect_right, bottom_right_rect_right]]
[cv2.rectangle(src, rect[0], rect[1], color, thickness) for rect in all_rects]
# draw straight lines
cv2.line(src, (p1[0] + corner_radius, p1[1]), (p2[0] - corner_radius, p2[1]), color, abs(thickness), line_type)
cv2.line(src, (p2[0], p2[1] + corner_radius), (p3[0], p3[1] - corner_radius), color, abs(thickness), line_type)
cv2.line(src, (p3[0] - corner_radius, p4[1]), (p4[0] + corner_radius, p3[1]), color, abs(thickness), line_type)
cv2.line(src, (p4[0], p4[1] - corner_radius), (p1[0], p1[1] + corner_radius), color, abs(thickness), line_type)
# draw arcs
cv2.ellipse(src, (p1[0] + corner_radius, p1[1] + corner_radius), (corner_radius, corner_radius), 180.0, 0, 90,
color, thickness, line_type)
cv2.ellipse(src, (p2[0] - corner_radius, p2[1] + corner_radius), (corner_radius, corner_radius), 270.0, 0, 90,
color, thickness, line_type)
cv2.ellipse(src, (p3[0] - corner_radius, p3[1] - corner_radius), (corner_radius, corner_radius), 0.0, 0, 90, color,
thickness, line_type)
cv2.ellipse(src, (p4[0] + corner_radius, p4[1] - corner_radius), (corner_radius, corner_radius), 90.0, 0, 90, color,
thickness, line_type)
return src | From https://stackoverflow.com/a/60210706 |
156,498 | import os
import uuid
import albumentations as A
import cv2
import numpy as np
from html2image import Html2Image
from manga_ocr_dev.env import BACKGROUND_DIR
from manga_ocr_dev.synthetic_data_generator.utils import get_background_df
def get_css(
font_size,
font_path,
vertical=True,
background_color='white',
text_color='black',
shadow_size=0,
shadow_color='black',
stroke_size=0,
stroke_color='black',
letter_spacing=None,
line_height=0.5,
text_orientation=None,
):
styles = [
f"background-color: {background_color};",
f"font-size: {font_size}px;",
f"color: {text_color};",
"font-family: custom;",
f"line-height: {line_height};",
"margin: 20px;",
]
if text_orientation:
styles.append(f"text-orientation: {text_orientation};")
if vertical:
styles.append("writing-mode: vertical-rl;")
if shadow_size > 0:
styles.append(f"text-shadow: 0 0 {shadow_size}px {shadow_color};")
if stroke_size > 0:
# stroke is simulated by shadow overlaid multiple times
styles.extend([
f"text-shadow: " + ','.join([f"0 0 {stroke_size}px {stroke_color}"] * 10 * stroke_size) + ";",
"-webkit-font-smoothing: antialiased;",
])
if letter_spacing:
styles.append(f"letter-spacing: {letter_spacing}em;")
font_path = font_path.replace('\\', '/')
styles_str = '\n'.join(styles)
css = ""
css += '\n@font-face {\nfont-family: custom;\nsrc: url("' + font_path + '");\n}\n'
css += "body {\n" + styles_str + "\n}"
return css | null |
156,499 | import traceback
from pathlib import Path
import cv2
import fire
import pandas as pd
from tqdm.contrib.concurrent import thread_map
from manga_ocr_dev.env import FONTS_ROOT, DATA_SYNTHETIC_ROOT
from manga_ocr_dev.synthetic_data_generator.generator import SyntheticDataGenerator
def f(args):
try:
i, source, id_, text = args
filename = f'{id_}.jpg'
img, text_gt, params = generator.process(text)
cv2.imwrite(str(OUT_DIR / filename), img)
font_path = Path(params['font_path']).relative_to(FONTS_ROOT)
ret = source, id_, text_gt, params['vertical'], str(font_path)
return ret
except Exception as e:
print(traceback.format_exc())
DATA_SYNTHETIC_ROOT = Path('~/data/manga/synthetic').expanduser()
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(package=0, n_random=1000, n_limit=None, max_workers=16)` to solve the following problem:
:param package: number of data package to generate :param n_random: how many samples with random text to generate :param n_limit: limit number of generated samples (for debugging) :param max_workers: max number of workers
Here is the function:
def run(package=0, n_random=1000, n_limit=None, max_workers=16):
"""
:param package: number of data package to generate
:param n_random: how many samples with random text to generate
:param n_limit: limit number of generated samples (for debugging)
:param max_workers: max number of workers
"""
package = f'{package:04d}'
lines = pd.read_csv(DATA_SYNTHETIC_ROOT / f'lines/{package}.csv')
random_lines = pd.DataFrame({
'source': 'random',
'id': [f'random_{package}_{i}' for i in range(n_random)],
'line': None
})
lines = pd.concat([lines, random_lines], ignore_index=True)
if n_limit:
lines = lines.sample(n_limit)
args = [(i, *values) for i, values in enumerate(lines.values)]
global OUT_DIR
OUT_DIR = DATA_SYNTHETIC_ROOT / 'img' / package
OUT_DIR.mkdir(parents=True, exist_ok=True)
data = thread_map(f, args, max_workers=max_workers, desc=f'Processing package {package}')
data = pd.DataFrame(data, columns=['source', 'id', 'text', 'vertical', 'font_path'])
meta_path = DATA_SYNTHETIC_ROOT / f'meta/{package}.csv'
meta_path.parent.mkdir(parents=True, exist_ok=True)
data.to_csv(meta_path, index=False) | :param package: number of data package to generate :param n_random: how many samples with random text to generate :param n_limit: limit number of generated samples (for debugging) :param max_workers: max number of workers |
156,500 | import PIL
import numpy as np
import pandas as pd
from PIL import ImageDraw, ImageFont
from fontTools.ttLib import TTFont
from tqdm.contrib.concurrent import process_map
from manga_ocr_dev.env import ASSETS_PATH, FONTS_ROOT
vocab = pd.read_csv(ASSETS_PATH / 'vocab.csv').char.values
def has_glyph(font, glyph):
for table in font['cmap'].tables:
if ord(glyph) in table.cmap.keys():
return True
return False
The provided code snippet includes necessary dependencies for implementing the `process` function. Write a Python function `def process(font_path)` to solve the following problem:
Get supported characters list for a given font. Font metadata is not always reliable, so try to render each character and see if anything shows up. Still not perfect, because sometimes unsupported characters show up as rectangles.
Here is the function:
def process(font_path):
"""
Get supported characters list for a given font.
Font metadata is not always reliable, so try to render each character and see if anything shows up.
Still not perfect, because sometimes unsupported characters show up as rectangles.
"""
try:
font_path = str(font_path)
ttfont = TTFont(font_path)
pil_font = ImageFont.truetype(font_path, 24)
supported_chars = []
for char in vocab:
if not has_glyph(ttfont, char):
continue
image = PIL.Image.new('L', (40, 40), 255)
draw = ImageDraw.Draw(image)
draw.text((10, 0), char, 0, font=pil_font)
if (np.array(image) != 255).sum() == 0:
continue
supported_chars.append(char)
supported_chars = ''.join(supported_chars)
except Exception as e:
print(f'Error while processing {font_path}: {e}')
supported_chars = ''
return supported_chars | Get supported characters list for a given font. Font metadata is not always reliable, so try to render each character and see if anything shows up. Still not perfect, because sometimes unsupported characters show up as rectangles. |
156,501 | import fire
import wandb
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, default_data_collator
from manga_ocr_dev.env import TRAIN_ROOT
from manga_ocr_dev.training.dataset import MangaDataset
from manga_ocr_dev.training.get_model import get_model
from manga_ocr_dev.training.metrics import Metrics
TRAIN_ROOT = Path('~/data/manga/out').expanduser()
class MangaDataset(Dataset):
def __init__(self, processor, split, max_target_length, limit_size=None, augment=False, skip_packages=None):
self.processor = processor
self.max_target_length = max_target_length
data = []
print(f'Initializing dataset {split}...')
if skip_packages is None:
skip_packages = set()
else:
skip_packages = {f'{x:04d}' for x in skip_packages}
for path in sorted((DATA_SYNTHETIC_ROOT / 'meta').glob('*.csv')):
if path.stem in skip_packages:
print(f'Skipping package {path}')
continue
if not (DATA_SYNTHETIC_ROOT / 'img' / path.stem).is_dir():
print(f'Missing image data for package {path}, skipping')
continue
df = pd.read_csv(path)
df = df.dropna()
df['path'] = df.id.apply(lambda x: str(DATA_SYNTHETIC_ROOT / 'img' / path.stem / f'{x}.jpg'))
df = df[['path', 'text']]
df['synthetic'] = True
data.append(df)
df = pd.read_csv(MANGA109_ROOT / 'data.csv')
df = df[df.split == split].reset_index(drop=True)
df['path'] = df.crop_path.apply(lambda x: str(MANGA109_ROOT / x))
df = df[['path', 'text']]
df['synthetic'] = False
data.append(df)
data = pd.concat(data, ignore_index=True)
if limit_size:
data = data.iloc[:limit_size]
self.data = data
print(f'Dataset {split}: {len(self.data)}')
self.augment = augment
self.transform_medium, self.transform_heavy = self.get_transforms()
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data.loc[idx]
text = sample.text
if self.augment:
medium_p = 0.8
heavy_p = 0.02
transform_variant = np.random.choice(['none', 'medium', 'heavy'],
p=[1 - medium_p - heavy_p, medium_p, heavy_p])
transform = {
'none': None,
'medium': self.transform_medium,
'heavy': self.transform_heavy,
}[transform_variant]
else:
transform = None
pixel_values = self.read_image(self.processor, sample.path, transform)
labels = self.processor.tokenizer(text,
padding="max_length",
max_length=self.max_target_length,
truncation=True).input_ids
labels = np.array(labels)
# important: make sure that PAD tokens are ignored by the loss function
labels[labels == self.processor.tokenizer.pad_token_id] = -100
encoding = {
"pixel_values": pixel_values,
"labels": torch.tensor(labels),
}
return encoding
def read_image(processor, path, transform=None):
img = cv2.imread(str(path))
if transform is None:
transform = A.ToGray(always_apply=True)
img = transform(image=img)['image']
pixel_values = processor(img, return_tensors="pt").pixel_values
return pixel_values.squeeze()
def get_transforms():
t_medium = A.Compose([
A.Rotate(5, border_mode=cv2.BORDER_REPLICATE, p=0.2),
A.Perspective((0.01, 0.05), pad_mode=cv2.BORDER_REPLICATE, p=0.2),
A.InvertImg(p=0.05),
A.OneOf([
A.Downscale(0.25, 0.5, interpolation=cv2.INTER_LINEAR),
A.Downscale(0.25, 0.5, interpolation=cv2.INTER_NEAREST),
], p=0.1),
A.Blur(p=0.2),
A.Sharpen(p=0.2),
A.RandomBrightnessContrast(p=0.5),
A.GaussNoise((50, 200), p=0.3),
A.ImageCompression(0, 30, p=0.1),
A.ToGray(always_apply=True),
])
t_heavy = A.Compose([
A.Rotate(10, border_mode=cv2.BORDER_REPLICATE, p=0.2),
A.Perspective((0.01, 0.05), pad_mode=cv2.BORDER_REPLICATE, p=0.2),
A.InvertImg(p=0.05),
A.OneOf([
A.Downscale(0.1, 0.2, interpolation=cv2.INTER_LINEAR),
A.Downscale(0.1, 0.2, interpolation=cv2.INTER_NEAREST),
], p=0.1),
A.Blur((4, 9), p=0.5),
A.Sharpen(p=0.5),
A.RandomBrightnessContrast(0.8, 0.8, p=1),
A.GaussNoise((1000, 10000), p=0.3),
A.ImageCompression(0, 10, p=0.5),
A.ToGray(always_apply=True),
])
return t_medium, t_heavy
def get_model(encoder_name, decoder_name, max_length, num_decoder_layers=None):
encoder_config = AutoConfig.from_pretrained(encoder_name)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
encoder = AutoModel.from_config(encoder_config)
decoder_config = AutoConfig.from_pretrained(decoder_name)
decoder_config.max_length = max_length
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
decoder = AutoModelForCausalLM.from_config(decoder_config)
if num_decoder_layers is not None:
if decoder_config.model_type == 'bert':
decoder.bert.encoder.layer = decoder.bert.encoder.layer[-num_decoder_layers:]
elif decoder_config.model_type in ('roberta', 'xlm-roberta'):
decoder.roberta.encoder.layer = decoder.roberta.encoder.layer[-num_decoder_layers:]
else:
raise ValueError(f'Unsupported model_type: {decoder_config.model_type}')
decoder_config.num_hidden_layers = num_decoder_layers
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
config.tie_word_embeddings = False
model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder, config=config)
processor = get_processor(encoder_name, decoder_name)
# set special tokens used for creating the decoder_input_ids from the labels
model.config.decoder_start_token_id = processor.tokenizer.cls_token_id
model.config.pad_token_id = processor.tokenizer.pad_token_id
# make sure vocab size is set correctly
model.config.vocab_size = model.config.decoder.vocab_size
# set beam search parameters
model.config.eos_token_id = processor.tokenizer.sep_token_id
model.config.max_length = max_length
model.config.early_stopping = True
model.config.no_repeat_ngram_size = 3
model.config.length_penalty = 2.0
model.config.num_beams = 4
return model, processor
class Metrics:
def __init__(self, processor):
self.cer_metric = load_metric("cer")
self.processor = processor
def compute_metrics(self, pred):
label_ids = pred.label_ids
pred_ids = pred.predictions
print(label_ids.shape, pred_ids.shape)
pred_str = self.processor.batch_decode(pred_ids, skip_special_tokens=True)
label_ids[label_ids == -100] = self.processor.tokenizer.pad_token_id
label_str = self.processor.batch_decode(label_ids, skip_special_tokens=True)
pred_str = np.array([''.join(text.split()) for text in pred_str])
label_str = np.array([''.join(text.split()) for text in label_str])
results = {}
try:
results['cer'] = self.cer_metric.compute(predictions=pred_str, references=label_str)
except Exception as e:
print(e)
print(pred_str)
print(label_str)
results['cer'] = 0
results['accuracy'] = (pred_str == label_str).mean()
return results
def run(
run_name='debug',
encoder_name='facebook/deit-tiny-patch16-224',
decoder_name='cl-tohoku/bert-base-japanese-char-v2',
max_len=300,
num_decoder_layers=2,
batch_size=64,
num_epochs=8,
fp16=True,
):
wandb.login()
model, processor = get_model(encoder_name, decoder_name, max_len, num_decoder_layers)
# keep package 0 for validation
train_dataset = MangaDataset(processor, 'train', max_len, augment=True, skip_packages=[0])
eval_dataset = MangaDataset(processor, 'test', max_len, augment=False, skip_packages=range(1, 9999))
metrics = Metrics(processor)
training_args = Seq2SeqTrainingArguments(
predict_with_generate=True,
evaluation_strategy='steps',
save_strategy='steps',
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
fp16=fp16,
fp16_full_eval=fp16,
dataloader_num_workers=16,
output_dir=TRAIN_ROOT,
logging_steps=10,
save_steps=20000,
eval_steps=20000,
num_train_epochs=num_epochs,
run_name=run_name
)
# instantiate trainer
trainer = Seq2SeqTrainer(
model=model,
tokenizer=processor.feature_extractor,
args=training_args,
compute_metrics=metrics.compute_metrics,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=default_data_collator,
)
trainer.train()
wandb.finish() | null |
156,502 | import numpy as np
import torch
from torchinfo import summary
def encoder_summary(model, batch_size=4):
img_size = model.config.encoder.image_size
return summary(model.encoder, input_size=(batch_size, 3, img_size, img_size), depth=3,
col_names=["output_size", "num_params", "mult_adds"], device='cpu') | null |
156,503 | import numpy as np
import torch
from torchinfo import summary
def decoder_summary(model, batch_size=4):
img_size = model.config.encoder.image_size
encoder_hidden_shape = (batch_size, (img_size // 16) ** 2 + 1, model.config.decoder.hidden_size)
decoder_inputs = {
'input_ids': torch.zeros(batch_size, 1, dtype=torch.int64),
'attention_mask': torch.ones(batch_size, 1, dtype=torch.int64),
'encoder_hidden_states': torch.rand(encoder_hidden_shape, dtype=torch.float32),
'return_dict': False
}
return summary(model.decoder, input_data=decoder_inputs, depth=4,
col_names=["output_size", "num_params", "mult_adds"],
device='cpu') | null |
156,504 | import numpy as np
import torch
from torchinfo import summary
def tensor_to_image(img):
return ((img.cpu().numpy() + 1) / 2 * 255).clip(0, 255).astype(np.uint8).transpose(1, 2, 0) | null |
156,505 | import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize(x, axis=-1)` to solve the following problem:
Normalizing to unit length along the specified dimension. Args: x: pytorch Variable Returns: x: pytorch Variable, same shape as input
Here is the function:
def normalize(x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x | Normalizing to unit length along the specified dimension. Args: x: pytorch Variable Returns: x: pytorch Variable, same shape as input |
156,506 | import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `euclidean_dist` function. Write a Python function `def euclidean_dist(x, y)` to solve the following problem:
Args: x: pytorch Variable, with shape [m, d] y: pytorch Variable, with shape [n, d] Returns: dist: pytorch Variable, with shape [m, n]
Here is the function:
def euclidean_dist(x, y):
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist | Args: x: pytorch Variable, with shape [m, d] y: pytorch Variable, with shape [n, d] Returns: dist: pytorch Variable, with shape [m, n] |
156,507 | import torch
from torch import nn
The provided code snippet includes necessary dependencies for implementing the `hard_example_mining` function. Write a Python function `def hard_example_mining(dist_mat, labels, return_inds=False)` to solve the following problem:
For each anchor, find the hardest positive and negative sample. Args: dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N] labels: pytorch LongTensor, with shape [N] return_inds: whether to return the indices. Save time if `False`(?) Returns: dist_ap: pytorch Variable, distance(anchor, positive); shape [N] dist_an: pytorch Variable, distance(anchor, negative); shape [N] p_inds: pytorch LongTensor, with shape [N]; indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1 n_inds: pytorch LongTensor, with shape [N]; indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1 NOTE: Only consider the case in which all labels have same num of samples, thus we can cope with all anchors in parallel.
Here is the function:
def hard_example_mining(dist_mat, labels, return_inds=False):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
labels: pytorch LongTensor, with shape [N]
return_inds: whether to return the indices. Save time if `False`(?)
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
NOTE: Only consider the case in which all labels have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, relative_p_inds = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, relative_n_inds = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze(0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)
n_inds = torch.gather(
ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an | For each anchor, find the hardest positive and negative sample. Args: dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N] labels: pytorch LongTensor, with shape [N] return_inds: whether to return the indices. Save time if `False`(?) Returns: dist_ap: pytorch Variable, distance(anchor, positive); shape [N] dist_an: pytorch Variable, distance(anchor, negative); shape [N] p_inds: pytorch LongTensor, with shape [N]; indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1 n_inds: pytorch LongTensor, with shape [N]; indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1 NOTE: Only consider the case in which all labels have same num of samples, thus we can cope with all anchors in parallel. |
156,508 | import torchvision.transforms as T
from .transforms import RandomErasing
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
def build_transforms(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform | null |
156,509 | import os.path as osp
from PIL import Image
from torch.utils.data import Dataset
The provided code snippet includes necessary dependencies for implementing the `read_image` function. Write a Python function `def read_image(img_path)` to solve the following problem:
Keep reading image until succeed. This can avoid IOError incurred by heavy IO process.
Here is the function:
def read_image(img_path):
"""Keep reading image until succeed.
This can avoid IOError incurred by heavy IO process."""
got_img = False
if not osp.exists(img_path):
raise IOError("{} does not exist".format(img_path))
while not got_img:
try:
img = Image.open(img_path).convert('RGB')
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img | Keep reading image until succeed. This can avoid IOError incurred by heavy IO process. |
156,510 | import numpy as np
The provided code snippet includes necessary dependencies for implementing the `eval_func` function. Write a Python function `def eval_func(distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50)` to solve the following problem:
Evaluation with market1501 metric Key: for each query identity, its gallery images from the same camera view are discarded.
Here is the function:
def eval_func(distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50):
"""Evaluation with market1501 metric
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print("Note: number of gallery samples is quite small, got {}".format(num_g))
indices = np.argsort(distmat, axis=1)
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_AP = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query pid and camid
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same pid and camid with query
order = indices[q_idx]
remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
# binary vector, positions with value 1 are correct matches
orig_cmc = matches[q_idx][keep]
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
return all_cmc, mAP | Evaluation with market1501 metric Key: for each query identity, its gallery images from the same camera view are discarded. |
156,511 | from torch.utils.data import DataLoader
from .collate_batch import train_collate_fn, val_collate_fn
from .datasets import init_dataset, ImageDataset
from .samplers import RandomIdentitySampler, RandomIdentitySampler_alignedreid
from .transforms import build_transforms
def train_collate_fn(batch):
imgs, pids, _, _, = zip(*batch)
pids = torch.tensor(pids, dtype=torch.int64)
return torch.stack(imgs, dim=0), pids
def val_collate_fn(batch):
imgs, pids, camids, _ = zip(*batch)
return torch.stack(imgs, dim=0), pids, camids
def init_dataset(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown datasets: {}".format(name))
return __factory[name](*args, **kwargs)
def make_data_loader(cfg):
train_transforms = build_transforms(cfg, is_train=True)
val_transforms = build_transforms(cfg, is_train=False)
num_workers = cfg.DATALOADER.NUM_WORKERS
if len(cfg.DATASETS.NAMES) == 1:
dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)
else:
# TODO: add multi dataset to train
dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)
num_classes = dataset.num_train_pids
train_set = ImageDataset(dataset.train, train_transforms)
if cfg.DATALOADER.SAMPLER == 'softmax':
train_loader = DataLoader(
train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH, shuffle=True, num_workers=num_workers,
collate_fn=train_collate_fn
)
else:
train_loader = DataLoader(
train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH,
sampler=RandomIdentitySampler(dataset.train, cfg.SOLVER.IMS_PER_BATCH, cfg.DATALOADER.NUM_INSTANCE),
# sampler=RandomIdentitySampler_alignedreid(dataset.train, cfg.DATALOADER.NUM_INSTANCE), # new add by gu
num_workers=num_workers, collate_fn=train_collate_fn
)
val_set = ImageDataset(dataset.query + dataset.gallery, val_transforms)
val_loader = DataLoader(
val_set, batch_size=cfg.TEST.IMS_PER_BATCH, shuffle=False, num_workers=num_workers,
collate_fn=val_collate_fn
)
return train_loader, val_loader, len(dataset.query), num_classes | null |
156,512 | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck_IBN(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, ibn=False, stride=1, downsample=None):
super(Bottleneck_IBN, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if ibn:
self.bn1 = IBN(planes)
else:
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_IBN(nn.Module):
def __init__(self, last_stride, block, layers, num_classes=1000):
scale = 64
self.inplanes = scale
super(ResNet_IBN, self).__init__()
self.conv1 = nn.Conv2d(3, scale, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(scale)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, scale, layers[0])
self.layer2 = self._make_layer(block, scale*2, layers[1], stride=2)
self.layer3 = self._make_layer(block, scale*4, layers[2], stride=2)
self.layer4 = self._make_layer(block, scale*8, layers[3], stride=last_stride)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(scale * 8 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
ibn = True
if planes == 512:
ibn = False
layers.append(block(self.inplanes, planes, ibn, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, ibn))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
The provided code snippet includes necessary dependencies for implementing the `resnet50_ibn_a` function. Write a Python function `def resnet50_ibn_a(last_stride, pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet50_ibn_a(last_stride, pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_IBN(last_stride, Bottleneck_IBN, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model | Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
156,513 | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck_IBN(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, ibn=False, stride=1, downsample=None):
super(Bottleneck_IBN, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if ibn:
self.bn1 = IBN(planes)
else:
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_IBN(nn.Module):
def __init__(self, last_stride, block, layers, num_classes=1000):
scale = 64
self.inplanes = scale
super(ResNet_IBN, self).__init__()
self.conv1 = nn.Conv2d(3, scale, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(scale)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, scale, layers[0])
self.layer2 = self._make_layer(block, scale*2, layers[1], stride=2)
self.layer3 = self._make_layer(block, scale*4, layers[2], stride=2)
self.layer4 = self._make_layer(block, scale*8, layers[3], stride=last_stride)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(scale * 8 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
ibn = True
if planes == 512:
ibn = False
layers.append(block(self.inplanes, planes, ibn, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, ibn))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
The provided code snippet includes necessary dependencies for implementing the `resnet101_ibn_a` function. Write a Python function `def resnet101_ibn_a(last_stride, pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet101_ibn_a(last_stride, pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_IBN(last_stride, Bottleneck_IBN, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model | Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
156,514 | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck_IBN(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, ibn=False, stride=1, downsample=None):
super(Bottleneck_IBN, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if ibn:
self.bn1 = IBN(planes)
else:
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_IBN(nn.Module):
def __init__(self, last_stride, block, layers, num_classes=1000):
scale = 64
self.inplanes = scale
super(ResNet_IBN, self).__init__()
self.conv1 = nn.Conv2d(3, scale, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(scale)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, scale, layers[0])
self.layer2 = self._make_layer(block, scale*2, layers[1], stride=2)
self.layer3 = self._make_layer(block, scale*4, layers[2], stride=2)
self.layer4 = self._make_layer(block, scale*8, layers[3], stride=last_stride)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(scale * 8 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
ibn = True
if planes == 512:
ibn = False
layers.append(block(self.inplanes, planes, ibn, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, ibn))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
The provided code snippet includes necessary dependencies for implementing the `resnet152_ibn_a` function. Write a Python function `def resnet152_ibn_a(last_stride, pretrained=False, **kwargs)` to solve the following problem:
Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def resnet152_ibn_a(last_stride, pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_IBN(last_stride, Bottleneck_IBN, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model | Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
156,516 | import torch
from torch import nn
from .backbones.resnet import ResNet, BasicBlock, Bottleneck
from .backbones.senet import SENet, SEResNetBottleneck, SEBottleneck, SEResNeXtBottleneck
from .backbones.resnet_ibn_a import resnet50_ibn_a
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0) | null |
156,517 | import torch
from torch import nn
from .backbones.resnet import ResNet, BasicBlock, Bottleneck
from .backbones.senet import SENet, SEResNetBottleneck, SEBottleneck, SEResNeXtBottleneck
from .backbones.resnet_ibn_a import resnet50_ibn_a
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0) | null |
156,518 | import torch
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.OPTIMIZER_NAME == 'SGD':
optimizer = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params, momentum=cfg.SOLVER.MOMENTUM)
else:
optimizer = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params)
return optimizer | null |
156,519 | import torch
def make_optimizer_with_center(cfg, model, center_criterion):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.OPTIMIZER_NAME == 'SGD':
optimizer = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params, momentum=cfg.SOLVER.MOMENTUM)
else:
optimizer = getattr(torch.optim, cfg.SOLVER.OPTIMIZER_NAME)(params)
optimizer_center = torch.optim.SGD(center_criterion.parameters(), lr=cfg.SOLVER.CENTER_LR)
return optimizer, optimizer_center | null |
156,520 | import logging
import torch
import torch.nn as nn
from ignite.engine import Engine
from utils.reid_metric import R1_mAP, R1_mAP_reranking
def create_supervised_evaluator(model, metrics,
device=None):
"""
Factory function for creating an evaluator for supervised models
Args:
model (`torch.nn.Module`): the model to train
metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: an evaluator engine with supervised inference function
"""
if device:
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(device)
def _inference(engine, batch):
model.eval()
with torch.no_grad():
data, pids, camids = batch
data = data.to(device) if torch.cuda.device_count() >= 1 else data
feat = model(data)
return feat, pids, camids
engine = Engine(_inference)
for name, metric in metrics.items():
metric.attach(engine, name)
return engine
class R1_mAP(Metric):
def __init__(self, num_query, max_rank=50, feat_norm='yes'):
super(R1_mAP, self).__init__()
self.num_query = num_query
self.max_rank = max_rank
self.feat_norm = feat_norm
def reset(self):
self.feats = []
self.pids = []
self.camids = []
def update(self, output):
feat, pid, camid = output
self.feats.append(feat)
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
def compute(self):
feats = torch.cat(self.feats, dim=0)
if self.feat_norm == 'yes':
print("The test feature is normalized")
feats = torch.nn.functional.normalize(feats, dim=1, p=2)
# query
qf = feats[:self.num_query]
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
# gallery
gf = feats[self.num_query:]
g_pids = np.asarray(self.pids[self.num_query:])
g_camids = np.asarray(self.camids[self.num_query:])
m, n = qf.shape[0], gf.shape[0]
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.cpu().numpy()
cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
return cmc, mAP
class R1_mAP_reranking(Metric):
def __init__(self, num_query, max_rank=50, feat_norm='yes'):
super(R1_mAP_reranking, self).__init__()
self.num_query = num_query
self.max_rank = max_rank
self.feat_norm = feat_norm
def reset(self):
self.feats = []
self.pids = []
self.camids = []
def update(self, output):
feat, pid, camid = output
self.feats.append(feat)
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
def compute(self):
feats = torch.cat(self.feats, dim=0)
if self.feat_norm == 'yes':
print("The test feature is normalized")
feats = torch.nn.functional.normalize(feats, dim=1, p=2)
# query
qf = feats[:self.num_query]
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
# gallery
gf = feats[self.num_query:]
g_pids = np.asarray(self.pids[self.num_query:])
g_camids = np.asarray(self.camids[self.num_query:])
# m, n = qf.shape[0], gf.shape[0]
# distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
# torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
# distmat.addmm_(1, -2, qf, gf.t())
# distmat = distmat.cpu().numpy()
print("Enter reranking")
distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
return cmc, mAP
def inference(
cfg,
model,
val_loader,
num_query
):
device = cfg.MODEL.DEVICE
logger = logging.getLogger("reid_baseline.inference")
logger.info("Enter inferencing")
if cfg.TEST.RE_RANKING == 'no':
print("Create evaluator")
evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
device=device)
elif cfg.TEST.RE_RANKING == 'yes':
print("Create evaluator for reranking")
evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP_reranking(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)},
device=device)
else:
print("Unsupported re_ranking config. Only support for no or yes, but got {}.".format(cfg.TEST.RE_RANKING))
evaluator.run(val_loader)
cmc, mAP = evaluator.state.metrics['r1_mAP']
logger.info('Validation Results')
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) | null |
156,521 | import argparse
import os
import sys
import torch
from torch.backends import cudnn
from config import cfg
from data import make_data_loader
from engine.trainer import do_train, do_train_with_center
from modeling import build_model
from layers import make_loss, make_loss_with_center
from solver import make_optimizer, make_optimizer_with_center, WarmupMultiStepLR
from utils.logger import setup_logger
def do_train(
cfg,
model,
train_loader,
val_loader,
optimizer,
scheduler,
loss_fn,
num_query,
start_epoch
):
def do_train_with_center(
cfg,
model,
center_criterion,
train_loader,
val_loader,
optimizer,
optimizer_center,
scheduler,
loss_fn,
num_query,
start_epoch
):
log_period = cfg.SOLVER.LOG_PERIOD
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
eval_period = cfg.SOLVER.EVAL_PERIOD
output_dir = cfg.OUTPUT_DIR
device = cfg.MODEL.DEVICE
epochs = cfg.SOLVER.MAX_EPOCHS
logger = logging.getLogger("reid_baseline.train")
logger.info("Start training")
trainer = create_supervised_trainer_with_center(model, center_criterion, optimizer, optimizer_center, loss_fn, cfg.SOLVER.CENTER_LOSS_WEIGHT, device=device)
evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device)
checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=10, require_empty=False)
timer = Timer(average=True)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model,
'optimizer': optimizer,
'center_param': center_criterion,
'optimizer_center': optimizer_center})
timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED,
pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED)
# average metric to attach on trainer
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'avg_loss')
RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'avg_acc')
def start_training(engine):
def adjust_learning_rate(engine):
def log_training_loss(engine):
# adding handlers using `trainer.on` decorator API
def print_times(engine):
def log_validation_results(engine):
trainer.run(train_loader, max_epochs=epochs)
def build_model(cfg, num_classes):
def make_loss(cfg, num_classes):
def make_loss_with_center(cfg, num_classes):
def train(cfg):
# prepare dataset
train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
# prepare model
model = build_model(cfg, num_classes)
if cfg.MODEL.IF_WITH_CENTER == 'no':
print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE)
optimizer = make_optimizer(cfg, model)
# scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
# cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
loss_func = make_loss(cfg, num_classes) # modified by gu
# Add for using self trained model
if cfg.MODEL.PRETRAIN_CHOICE == 'self':
start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1])
print('Start epoch:', start_epoch)
path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer')
print('Path to the checkpoint of optimizer:', path_to_optimizer)
model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
optimizer.load_state_dict(torch.load(path_to_optimizer))
scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch)
elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
start_epoch = 0
scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
else:
print('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE))
arguments = {}
do_train(
cfg,
model,
train_loader,
val_loader,
optimizer,
scheduler, # modify for using self trained model
loss_func,
num_query,
start_epoch # add for using self trained model
)
elif cfg.MODEL.IF_WITH_CENTER == 'yes':
print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE)
loss_func, center_criterion = make_loss_with_center(cfg, num_classes) # modified by gu
optimizer, optimizer_center = make_optimizer_with_center(cfg, model, center_criterion)
# scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
# cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
arguments = {}
# Add for using self trained model
if cfg.MODEL.PRETRAIN_CHOICE == 'self':
start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1])
print('Start epoch:', start_epoch)
path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer')
print('Path to the checkpoint of optimizer:', path_to_optimizer)
path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace('model', 'center_param')
print('Path to the checkpoint of center_param:', path_to_center_param)
path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer_center')
print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center)
model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
optimizer.load_state_dict(torch.load(path_to_optimizer))
center_criterion.load_state_dict(torch.load(path_to_center_param))
optimizer_center.load_state_dict(torch.load(path_to_optimizer_center))
scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch)
elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
start_epoch = 0
scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
else:
print('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE))
do_train_with_center(
cfg,
model,
center_criterion,
train_loader,
val_loader,
optimizer,
optimizer_center,
scheduler, # modify for using self trained model
loss_func,
num_query,
start_epoch # add for using self trained model
)
else:
print("Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n".format(cfg.MODEL.IF_WITH_CENTER)) | null |
156,522 | import numpy as np
import torch
def re_ranking(probFea, galFea, k1, k2, lambda_value, local_distmat=None, only_local=False):
# if feature vector is numpy, you should use 'torch.tensor' transform it to tensor
query_num = probFea.size(0)
all_num = query_num + galFea.size(0)
if only_local:
original_dist = local_distmat
else:
feat = torch.cat([probFea,galFea])
print('using GPU to compute original distance')
distmat = torch.pow(feat,2).sum(dim=1, keepdim=True).expand(all_num,all_num) + \
torch.pow(feat, 2).sum(dim=1, keepdim=True).expand(all_num, all_num).t()
distmat.addmm_(1,-2,feat,feat.t())
original_dist = distmat.cpu().numpy()
del feat
if not local_distmat is None:
original_dist = original_dist + local_distmat
gallery_num = original_dist.shape[0]
original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float16)
initial_rank = np.argsort(original_dist).astype(np.int32)
print('starting re_ranking')
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2)) + 1]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
for i in range(query_num):
temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist | null |
156,523 | import logging
import os
import sys
def setup_logger(name, save_dir, distributed_rank):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | null |
156,524 | import errno
import json
import os
import os.path as osp
def check_isfile(path):
isfile = osp.isfile(path)
if not isfile:
print("=> Warning: no file found at '{}' (ignored)".format(path))
return isfile | null |
156,525 | import errno
import json
import os
import os.path as osp
def read_json(fpath):
with open(fpath, 'r') as f:
obj = json.load(f)
return obj | null |
156,526 | import errno
import json
import os
import os.path as osp
def mkdir_if_missing(directory):
def write_json(obj, fpath):
mkdir_if_missing(osp.dirname(fpath))
with open(fpath, 'w') as f:
json.dump(obj, f, indent=4, separators=(',', ': ')) | null |
156,527 | from pathlib import Path
from colorama import Style, Fore
import sys
import threading
import webbrowser
from .errors import _err_msg, ArchiveNotFoundException
from .channel import Channel, DownloadConfig
from .viewer import viewer
HELP = f"yark [options]\n\n YouTube archiving made simple.\n\nOptions:\n new [name] [url] Creates new archive with name and channel url\n refresh [name] [args?] Refreshes/downloads archive with optional config\n view [name?] Launches offline archive viewer website\n report [name] Provides a report on the most interesting changes\n\nExample:\n $ yark new owez https://www.youtube.com/channel/UCSMdm6bUYIBN0KfS2CVuEPA\n $ yark refresh owez\n $ yark view owez"
def _err_archive_not_found():
"""Errors out the user if the archive doesn't exist"""
_err_msg("Archive doesn't exist, please make sure you typed it's name correctly!")
sys.exit(1)
def _err_no_help():
"""Prints out help message and exits, displaying a 'no additional help' message"""
print(HELP)
print("\nThere's no additional help for this command")
sys.exit(0)
class ArchiveNotFoundException(Exception):
"""Archive couldn't be found, the name was probably incorrect"""
def __init__(self, *args: object) -> None:
super().__init__(*args)
def _err_msg(msg: str, report_msg: bool = False):
"""Provides a red-coloured error message to the user in the STDERR pipe"""
msg = (
msg
if not report_msg
else f"{msg}\nPlease file a bug report if you think this is a problem with Yark!"
)
print(Fore.RED + Style.BRIGHT + msg + Style.NORMAL + Fore.RESET, file=sys.stderr)
class DownloadConfig:
max_videos: Optional[int]
max_livestreams: Optional[int]
max_shorts: Optional[int]
skip_download: bool
skip_metadata: bool
format: Optional[str]
def __init__(self) -> None:
self.max_videos = None
self.max_livestreams = None
self.max_shorts = None
self.skip_download = False
self.skip_metadata = False
self.format = None
def submit(self):
"""Submits configuration, this has the effect of normalising maximums to 0 properly"""
# Adjust remaining maximums if one is given
no_maximums = (
self.max_videos is None
and self.max_livestreams is None
and self.max_shorts is None
)
if not no_maximums:
if self.max_videos is None:
self.max_videos = 0
if self.max_livestreams is None:
self.max_livestreams = 0
if self.max_shorts is None:
self.max_shorts = 0
# If all are 0 as its equivalent to skipping download
if self.max_videos == 0 and self.max_livestreams == 0 and self.max_shorts == 0:
print(
Fore.YELLOW
+ "Using the skip downloads option is recommended over setting maximums to 0"
+ Fore.RESET
)
self.skip_download = True
class Channel:
path: Path
version: int
url: str
videos: list[Video]
livestreams: list[Video]
shorts: list[Video]
reporter: Reporter
def new(path: Path, url: str) -> Channel:
"""Creates a new channel"""
# Details
print("Creating new channel..")
channel = Channel()
channel.path = Path(path)
channel.version = ARCHIVE_COMPAT
channel.url = url
channel.videos = []
channel.livestreams = []
channel.shorts = []
channel.reporter = Reporter(channel)
# Commit and return
channel.commit()
return channel
def _new_empty() -> Channel:
return Channel.new(
Path("pretend"), "https://www.youtube.com/channel/UCSMdm6bUYIBN0KfS2CVuEPA"
)
def load(path: Path) -> Channel:
"""Loads existing channel from path"""
# Check existence
path = Path(path)
channel_name = path.name
print(f"Loading {channel_name} channel..")
if not path.exists():
raise ArchiveNotFoundException("Archive doesn't exist")
# Load config
encoded = json.load(open(path / "yark.json", "r"))
# Check version before fully decoding and exit if wrong
archive_version = encoded["version"]
if archive_version != ARCHIVE_COMPAT:
encoded = _migrate_archive(
archive_version, ARCHIVE_COMPAT, encoded, channel_name
)
# Decode and return
return Channel._from_dict(encoded, path)
def metadata(self):
"""Queries YouTube for all channel metadata to refresh known videos"""
# Print loading progress at the start without loading indicator so theres always a print
msg = "Downloading metadata.."
print(msg, end="\r")
# Download metadata and give the user a spinner bar
with ThreadPoolExecutor() as ex:
# Make future for downloading metadata
future = ex.submit(self._download_metadata)
# Start spinning
with PieSpinner(f"{msg} ") as bar:
# Don't show bar for 2 seconds but check if future is done
no_bar_time = time.time() + 2
while time.time() < no_bar_time:
if future.done():
break
time.sleep(0.25)
# Show loading spinner
while not future.done():
bar.next()
time.sleep(0.075)
# Get result from thread now that it's finished
res = future.result()
# Uncomment for saving big dumps for testing
# with open(self.path / "dump.json", "w+") as file:
# json.dump(res, file)
# Uncomment for loading big dumps for testing
# res = json.load(open(self.path / "dump.json", "r"))
# Parse downloaded metadata
self._parse_metadata(res)
def _download_metadata(self) -> dict[str, Any]:
"""Downloads metadata dict and returns for further parsing"""
# Construct downloader
settings = {
# Centralized logging system; makes output fully quiet
"logger": VideoLogger(),
# Skip downloading pending livestreams (#60 <https://github.com/Owez/yark/issues/60>)
"ignore_no_formats_error": True,
# Concurrent fragment downloading for increased resilience (#109 <https://github.com/Owez/yark/issues/109>)
"concurrent_fragment_downloads": 8,
}
# Get response and snip it
with YoutubeDL(settings) as ydl:
for i in range(3):
try:
res: dict[str, Any] = ydl.extract_info(self.url, download=False)
return res
except Exception as exception:
# Report error
retrying = i != 2
_err_dl("metadata", exception, retrying)
# Print retrying message
if retrying:
print(
Style.DIM
+ f" • Retrying metadata download.."
+ Style.RESET_ALL
) # TODO: compat with loading bar
def _parse_metadata(self, res: dict[str, Any]):
"""Parses entirety of downloaded metadata"""
# Normalize into types of videos
videos = []
livestreams = []
shorts = []
if "entries" not in res["entries"][0]:
# Videos only
videos = res["entries"]
else:
# Videos and at least one other (livestream/shorts)
for entry in res["entries"]:
kind = entry["title"].split(" - ")[-1].lower()
if kind == "videos":
videos = entry["entries"]
elif kind == "live":
livestreams = entry["entries"]
elif kind == "shorts":
shorts = entry["entries"]
else:
_err_msg(f"Unknown video kind '{kind}' found", True)
# Parse metadata
self._parse_metadata_videos("video", videos, self.videos)
self._parse_metadata_videos("livestream", livestreams, self.livestreams)
self._parse_metadata_videos("shorts", shorts, self.shorts)
# Go through each and report deleted
self._report_deleted(self.videos)
self._report_deleted(self.livestreams)
self._report_deleted(self.shorts)
def download(self, config: DownloadConfig):
"""Downloads all videos which haven't already been downloaded"""
# Clean out old part files
self._clean_parts()
# Create settings for the downloader
settings = {
# Set the output path
"outtmpl": f"{self.path}/videos/%(id)s.%(ext)s",
# Centralized logger hook for ignoring all stdout
"logger": VideoLogger(),
# Logger hook for download progress
"progress_hooks": [VideoLogger.downloading],
}
if config.format is not None:
settings["format"] = config.format
# Attach to the downloader
with YoutubeDL(settings) as ydl:
# Retry downloading 5 times in total for all videos
for i in range(5):
# Try to curate a list and download videos on it
try:
# Curate list of non-downloaded videos
not_downloaded = self._curate(config)
# Stop if there's nothing to download
if len(not_downloaded) == 0:
break
# Print curated if this is the first time
if i == 0:
fmt_num = (
"a new video"
if len(not_downloaded) == 1
else f"{len(not_downloaded)} new videos"
)
print(f"Downloading {fmt_num}..")
# Continuously try to download after private/deleted videos are found
# This block gives the downloader all the curated videos and skips/reports deleted videos by filtering their exceptions
while True:
# Download from curated list then exit the optimistic loop
try:
urls = [video.url() for video in not_downloaded]
ydl.download(urls)
break
# Special handling for private/deleted videos which are archived, if not we raise again
except DownloadError as exception:
# Video is privated or deleted
if (
"Private video" in exception.msg
or "This video has been removed by the uploader"
in exception.msg
):
# Skip video from curated and get it as a return
not_downloaded, video = _skip_video(
not_downloaded, "deleted"
)
# If this is a new occurrence then set it & report
# This will only happen if its deleted after getting metadata, like in a dry run
if video.deleted.current() == False:
self.reporter.deleted.append(video)
video.deleted.update(None, True)
# User hasn't got ffmpeg installed and youtube hasn't got format 22
# NOTE: see #55 <https://github.com/Owez/yark/issues/55> to learn more
# NOTE: sadly yt-dlp doesn't let us access yt_dlp.utils.ContentTooShortError so we check msg
elif " bytes, expected " in exception.msg:
# Skip video from curated
not_downloaded, _ = _skip_video(
not_downloaded,
"no format found; please download ffmpeg!",
True,
)
# Nevermind, normal exception
else:
raise exception
# Stop if we've got them all
break
# Report error and retry/stop
except Exception as exception:
# Get around carriage return
if i == 0:
print()
# Report error
_err_dl("videos", exception, i != 4)
def search(self, id: str):
"""Searches channel for a video with the corresponding `id` and returns"""
# Search
for video in self.videos:
if video.id == id:
return video
# Raise exception if it's not found
raise VideoNotFoundException(f"Couldn't find {id} inside archive")
def _curate(self, config: DownloadConfig) -> list[Video]:
"""Curate videos which aren't downloaded and return their urls"""
def curate_list(videos: list[Video], maximum: Optional[int]) -> list[Video]:
"""Curates the videos inside of the provided `videos` list to it's local maximum"""
# Cut available videos to maximum if present for deterministic getting
if maximum is not None:
# Fix the maximum to the length so we don't try to get more than there is
fixed_maximum = min(max(len(videos) - 1, 0), maximum)
# Set the available videos to this fixed maximum
new_videos = []
for ind in range(fixed_maximum):
new_videos.append(videos[ind])
videos = new_videos
# Find undownloaded videos in available list
not_downloaded = []
for video in videos:
if not video.downloaded():
not_downloaded.append(video)
# Return
return not_downloaded
# Curate
not_downloaded = []
not_downloaded.extend(curate_list(self.videos, config.max_videos))
not_downloaded.extend(curate_list(self.livestreams, config.max_livestreams))
not_downloaded.extend(curate_list(self.shorts, config.max_shorts))
# Return
return not_downloaded
def commit(self):
"""Commits (saves) archive to path; do this once you've finished all of your transactions"""
# Save backup
self._backup()
# Directories
print(f"Committing {self} to file..")
paths = [self.path, self.path / "thumbnails", self.path / "videos"]
for path in paths:
if not path.exists():
path.mkdir()
# Config
with open(self.path / "yark.json", "w+") as file:
json.dump(self._to_dict(), file)
def _parse_metadata_videos(self, kind: str, i: list, bucket: list):
"""Parses metadata for a category of video into it's bucket and tells user what's happening"""
# Print at the start without loading indicator so theres always a print
msg = f"Parsing {kind} metadata.."
print(msg, end="\r")
# Start computing and show loading spinner
with ThreadPoolExecutor() as ex:
# Make future for computation of the video list
future = ex.submit(self._parse_metadata_videos_comp, i, bucket)
# Start spinning
with PieSpinner(f"{msg} ") as bar:
# Don't show bar for 2 seconds but check if future is done
no_bar_time = time.time() + 2
while time.time() < no_bar_time:
if future.done():
return
time.sleep(0.25)
# Spin until future is done
while not future.done():
time.sleep(0.075)
bar.next()
def _parse_metadata_videos_comp(self, i: list, bucket: list):
"""Computes the actual parsing for `_parse_metadata_videos` without outputting what's happening"""
for entry in i:
# Skip video if there's no formats available; happens with upcoming videos/livestreams
if "formats" not in entry or len(entry["formats"]) == 0:
continue
# Updated intra-loop marker
updated = False
# Update video if it exists
for video in bucket:
if video.id == entry["id"]:
video.update(entry)
updated = True
break
# Add new video if not
if not updated:
video = Video.new(entry, self)
bucket.append(video)
self.reporter.added.append(video)
# Sort videos by newest
bucket.sort(reverse=True)
def _report_deleted(self, videos: list):
"""Goes through a video category to report & save those which where not marked in the metadata as deleted if they're not already known to be deleted"""
for video in videos:
if video.deleted.current() == False and not video.known_not_deleted:
self.reporter.deleted.append(video)
video.deleted.update(None, True)
def _clean_parts(self):
"""Cleans old temporary `.part` files which where stopped during download if present"""
# Make a bucket for found files
deletion_bucket: list[Path] = []
# Scan through and find part files
videos = self.path / "videos"
for file in videos.iterdir():
if file.suffix == ".part" or file.suffix == ".ytdl":
deletion_bucket.append(file)
# Print and delete if there are part files present
if len(deletion_bucket) != 0:
print("Cleaning out previous temporary files..")
for file in deletion_bucket:
file.unlink()
def _backup(self):
"""Creates a backup of the existing `yark.json` file in path as `yark.bak` with added comments"""
# Get current archive path
ARCHIVE_PATH = self.path / "yark.json"
# Skip backing up if the archive doesn't exist
if not ARCHIVE_PATH.exists():
return
# Open original archive to copy
with open(self.path / "yark.json", "r") as file_archive:
# Add comment information to backup file
save = f"// Backup of a Yark archive, dated {datetime.utcnow().isoformat()}\n// Remove these comments and rename to 'yark.json' to restore\n{file_archive.read()}"
# Save new information into a new backup
with open(self.path / "yark.bak", "w+") as file_backup:
file_backup.write(save)
def _from_dict(encoded: dict, path: Path) -> Channel:
"""Decodes archive which is being loaded back up"""
channel = Channel()
channel.path = path
channel.version = encoded["version"]
channel.url = encoded["url"]
channel.reporter = Reporter(channel)
channel.videos = [
Video._from_dict(video, channel) for video in encoded["videos"]
]
channel.livestreams = [
Video._from_dict(video, channel) for video in encoded["livestreams"]
]
channel.shorts = [
Video._from_dict(video, channel) for video in encoded["shorts"]
]
return channel
def _to_dict(self) -> dict:
"""Converts channel data to a dictionary to commit"""
return {
"version": self.version,
"url": self.url,
"videos": [video._to_dict() for video in self.videos],
"livestreams": [video._to_dict() for video in self.livestreams],
"shorts": [video._to_dict() for video in self.shorts],
}
def __repr__(self) -> str:
return self.path.name
def viewer() -> Flask:
"""Generates viewer flask app, launch by just using the typical `app.run()`"""
# Make flask app
app = Flask(__name__)
# Only log errors
log = logging.getLogger("werkzeug")
log.setLevel(logging.ERROR)
# Routing blueprint
app.register_blueprint(routes)
# TODO: redo nicer
def _jinja2_filter_timestamp(timestamp, fmt=None):
"""Special hook for timestamps"""
return _encode_timestamp(timestamp)
# Return
return app
The provided code snippet includes necessary dependencies for implementing the `_cli` function. Write a Python function `def _cli()` to solve the following problem:
Command-line-interface launcher
Here is the function:
def _cli():
"""Command-line-interface launcher"""
# Get arguments
args = sys.argv[1:]
# No arguments
if len(args) == 0:
print(HELP, file=sys.stderr)
_err_msg(f"\nError: No arguments provided")
sys.exit(1)
# Help
if args[0] in ["help", "--help", "-h"]:
print(HELP)
# Version
# TODO: automatically track this
elif args[0] in ["-v", "-ver", "--version", "--v"]:
print("1.2.9")
# Create new
elif args[0] == "new":
# More help
if len(args) == 2 and args[1] == "--help":
_err_no_help()
# Bad arguments
if len(args) < 3:
_err_msg("Please provide an archive name and the channel url")
sys.exit(1)
# Create channel
Channel.new(Path(args[1]), args[2])
# Refresh
elif args[0] == "refresh":
# More help
if len(args) == 2 and args[1] == "--help":
# NOTE: if these get more complex, separate into something like "basic config" and "advanced config"
print(
f"yark refresh [name] [args?]\n\n Refreshes/downloads archive with optional configuration.\n If a maximum is set, unset categories won't be downloaded\n\nArguments:\n --videos=[max] Maximum recent videos to download\n --shorts=[max] Maximum recent shorts to download\n --livestreams=[max] Maximum recent livestreams to download\n --skip-metadata Skips downloading metadata\n --skip-download Skips downloading content\n --format=[str] Downloads using custom yt-dlp format for advanced users\n\n Example:\n $ yark refresh demo\n $ yark refresh demo --videos=5\n $ yark refresh demo --shorts=2 --livestreams=25\n $ yark refresh demo --skip-download"
)
sys.exit(0)
# Bad arguments
if len(args) < 2:
_err_msg("Please provide the archive name")
sys.exit(1)
# Figure out configuration
config = DownloadConfig()
if len(args) > 2:
def parse_value(config_arg: str) -> str:
return config_arg.split("=")[1]
def parse_maximum_int(config_arg: str) -> int:
"""Tries to parse a maximum integer input"""
maximum = parse_value(config_arg)
try:
return int(maximum)
except:
print(HELP, file=sys.stderr)
_err_msg(
f"\nError: The value '{maximum}' isn't a valid maximum number"
)
sys.exit(1)
# Go through each configuration argument
for config_arg in args[2:]:
# Video maximum
if config_arg.startswith("--videos="):
config.max_videos = parse_maximum_int(config_arg)
# Livestream maximum
elif config_arg.startswith("--livestreams="):
config.max_livestreams = parse_maximum_int(config_arg)
# Shorts maximum
elif config_arg.startswith("--shorts="):
config.max_shorts = parse_maximum_int(config_arg)
# No metadata
elif config_arg == "--skip-metadata":
config.skip_metadata = True
# No downloading; functionally equivalent to all maximums being 0 but it skips entirely
elif config_arg == "--skip-download":
config.skip_download = True
# Custom yt-dlp format
elif config_arg.startswith("--format="):
config.format = parse_value(config_arg)
# Unknown argument
else:
print(HELP, file=sys.stderr)
_err_msg(
f"\nError: Unknown configuration '{config_arg}' provided for archive refresh"
)
sys.exit(1)
# Submit config settings
config.submit()
# Refresh channel using config context
try:
channel = Channel.load(args[1])
if config.skip_metadata:
print("Skipping metadata download..")
else:
channel.metadata()
if config.skip_download:
print("Skipping videos/livestreams/shorts download..")
else:
channel.download(config)
channel.commit()
channel.reporter.print()
except ArchiveNotFoundException:
_err_archive_not_found()
# View
elif args[0] == "view":
# More help
if len(args) == 2 and args[1] == "--help":
print(
f"yark view [name] [args?]\n\n Launches offline archive viewer website.\n\nArguments:\n --host [str] Custom uri to act as host from\n --port [int] Custom port number instead of 7667\n\n Example:\n $ yark view foobar\n $ yark view foobar --port=80\n $ yark view foobar --port=1234 --host=0.0.0.0"
)
sys.exit(0)
# Basis for custom host/port configs
host = None
port = 7667
# Go through each configuration argument
for config_arg in args[2:]:
# Host configuration
if config_arg.startswith("--host="):
host = config_arg[7:]
# Port configuration
elif config_arg.startswith("--port="):
if config_arg[7:].strip() == "":
print(
f"No port number provided for port argument",
file=sys.stderr,
)
sys.exit(1)
try:
port = int(config_arg[7:])
except:
print(
f"Invalid port number '{config_arg[7:]}' provided",
file=sys.stderr,
)
sys.exit(1)
def launch():
"""Launches viewer"""
app = viewer()
threading.Thread(target=lambda: app.run(host=host, port=port)).run()
# Start on channel name
if len(args) > 1:
# Get name
channel = args[1]
# Jank archive check
if not Path(channel).exists():
_err_archive_not_found()
# Launch and start browser
print(f"Starting viewer for {channel}..")
webbrowser.open(f"http://127.0.0.1:7667/channel/{channel}/videos")
launch()
# Start on channel finder
else:
print("Starting viewer..")
webbrowser.open(f"http://127.0.0.1:7667/")
launch()
# Report
elif args[0] == "report":
# Bad arguments
if len(args) < 2:
_err_msg("Please provide the archive name")
sys.exit(1)
channel = Channel.load(Path(args[1]))
channel.reporter.interesting_changes()
# Unknown
else:
print(HELP, file=sys.stderr)
_err_msg(f"\nError: Unknown command '{args[0]}' provided!", True)
sys.exit(1) | Command-line-interface launcher |
156,528 |
The provided code snippet includes necessary dependencies for implementing the `_truncate_text` function. Write a Python function `def _truncate_text(text: str, to: int = 31) -> str` to solve the following problem:
Truncates inputted `text` to ~32 length, adding ellipsis at the end if overflowing
Here is the function:
def _truncate_text(text: str, to: int = 31) -> str:
"""Truncates inputted `text` to ~32 length, adding ellipsis at the end if overflowing"""
if len(text) > to:
text = text[: to - 2].strip() + ".."
return text.ljust(to) | Truncates inputted `text` to ~32 length, adding ellipsis at the end if overflowing |
156,529 | from __future__ import annotations
from datetime import datetime
from fnmatch import fnmatch
from pathlib import Path
from uuid import uuid4
import requests
import hashlib
from .errors import NoteNotFoundException
from .utils import _truncate_text
from typing import TYPE_CHECKING, Any, Optional
The provided code snippet includes necessary dependencies for implementing the `_decode_date_yt` function. Write a Python function `def _decode_date_yt(input: str) -> datetime` to solve the following problem:
Decodes date from YouTube like `20180915` for example
Here is the function:
def _decode_date_yt(input: str) -> datetime:
"""Decodes date from YouTube like `20180915` for example"""
return datetime.strptime(input, "%Y%m%d") | Decodes date from YouTube like `20180915` for example |
156,530 | from __future__ import annotations
from datetime import datetime
from fnmatch import fnmatch
from pathlib import Path
from uuid import uuid4
import requests
import hashlib
from .errors import NoteNotFoundException
from .utils import _truncate_text
from typing import TYPE_CHECKING, Any, Optional
The provided code snippet includes necessary dependencies for implementing the `_encode_date_human` function. Write a Python function `def _encode_date_human(input: datetime) -> str` to solve the following problem:
Encodes an `input` date into a standardized human-readable format
Here is the function:
def _encode_date_human(input: datetime) -> str:
"""Encodes an `input` date into a standardized human-readable format"""
return input.strftime("%d %b %Y") | Encodes an `input` date into a standardized human-readable format |
156,531 | from __future__ import annotations
from datetime import datetime
from fnmatch import fnmatch
from pathlib import Path
from uuid import uuid4
import requests
import hashlib
from .errors import NoteNotFoundException
from .utils import _truncate_text
from typing import TYPE_CHECKING, Any, Optional
The provided code snippet includes necessary dependencies for implementing the `_magnitude` function. Write a Python function `def _magnitude(count: Optional[int] = None) -> str` to solve the following problem:
Displays an integer as a sort of ordinal order of magnitude
Here is the function:
def _magnitude(count: Optional[int] = None) -> str:
"""Displays an integer as a sort of ordinal order of magnitude"""
if count is None:
return "?"
elif count < 1000:
return str(count)
elif count < 1000000:
value = "{:.1f}".format(float(count) / 1000.0)
return value + "k"
elif count < 1000000000:
value = "{:.1f}".format(float(count) / 1000000.0)
return value + "m"
else:
value = "{:.1f}".format(float(count) / 1000000000.0)
return value + "b" | Displays an integer as a sort of ordinal order of magnitude |
156,532 | from colorama import Fore, Style
import datetime
from .video import Video, Element
from .utils import _truncate_text
from typing import TYPE_CHECKING, Optional
The provided code snippet includes necessary dependencies for implementing the `_watermark` function. Write a Python function `def _watermark() -> str` to solve the following problem:
Returns a new watermark with a Yark timestamp
Here is the function:
def _watermark() -> str:
"""Returns a new watermark with a Yark timestamp"""
date = datetime.datetime.utcnow().isoformat()
return Style.RESET_ALL + f"Yark – {date}" | Returns a new watermark with a Yark timestamp |
156,533 | import json
import os
from flask import (
Flask,
render_template,
request,
redirect,
url_for,
send_from_directory,
Blueprint,
)
import logging
from .errors import (
ArchiveNotFoundException,
NoteNotFoundException,
VideoNotFoundException,
TimestampException,
)
from .channel import Channel
from .video import Note
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
Open channel for non-selected channel
Here is the function:
def index():
"""Open channel for non-selected channel"""
# Redirect to requested channel
if request.method == "POST":
name = request.form["channel"]
return redirect(url_for("routes.channel", name=name, kind="videos"))
# Show page
elif request.method == "GET":
visited = request.cookies.get("visited")
if visited is not None:
visited = json.loads(visited)
error = request.args["error"] if "error" in request.args else None
return render_template("index.html", error=error, visited=visited) | Open channel for non-selected channel |
156,534 | import json
import os
from flask import (
Flask,
render_template,
request,
redirect,
url_for,
send_from_directory,
Blueprint,
)
import logging
from .errors import (
ArchiveNotFoundException,
NoteNotFoundException,
VideoNotFoundException,
TimestampException,
)
from .channel import Channel
from .video import Note
The provided code snippet includes necessary dependencies for implementing the `channel_empty` function. Write a Python function `def channel_empty(name)` to solve the following problem:
Empty channel url, just redirect to videos by default
Here is the function:
def channel_empty(name):
"""Empty channel url, just redirect to videos by default"""
return redirect(url_for("routes.channel", name=name, kind="videos")) | Empty channel url, just redirect to videos by default |
156,535 | import json
import os
from flask import (
Flask,
render_template,
request,
redirect,
url_for,
send_from_directory,
Blueprint,
)
import logging
from .errors import (
ArchiveNotFoundException,
NoteNotFoundException,
VideoNotFoundException,
TimestampException,
)
from .channel import Channel
from .video import Note
def channel(name, kind):
"""Channel information"""
if kind not in ["videos", "livestreams", "shorts"]:
return redirect(url_for("routes.index", error="Video kind not recognised"))
try:
channel = Channel.load(name)
ldir = os.listdir(channel.path / "videos")
return render_template(
"channel.html", title=name, channel=channel, name=name, ldir=ldir
)
except ArchiveNotFoundException:
return redirect(
url_for("routes.index", error="Couldn't open channel's archive")
)
except Exception as e:
return redirect(url_for("routes.index", error=f"Internal server error:\n{e}"))
def _decode_timestamp(input: str) -> int:
"""Parses timestamp into seconds or raises `TimestampException`"""
# Check existence
input = input.strip()
if input == "":
raise TimestampException("No input provided")
# Split colons
splitted = input.split(":")
splitted.reverse()
if len(splitted) > 3:
raise TimestampException("Days and onwards aren't supported")
# Parse
secs = 0
try:
# Seconds
secs += int(splitted[0])
# Minutes
if len(splitted) > 1:
secs += int(splitted[1]) * 60
# Hours
if len(splitted) > 2:
secs += int(splitted[2]) * 60 * 60
except:
raise TimestampException("Only numbers are allowed in timestamps")
# Return
return secs
class ArchiveNotFoundException(Exception):
"""Archive couldn't be found, the name was probably incorrect"""
def __init__(self, *args: object) -> None:
super().__init__(*args)
class VideoNotFoundException(Exception):
"""Video couldn't be found, the id was probably incorrect"""
def __init__(self, *args: object) -> None:
super().__init__(*args)
class NoteNotFoundException(Exception):
"""Note couldn't be found, the id was probably incorrect"""
def __init__(self, *args: object) -> None:
super().__init__(*args)
class TimestampException(Exception):
"""Invalid timestamp inputted for note"""
def __init__(self, *args: object) -> None:
super().__init__(*args)
class Channel:
path: Path
version: int
url: str
videos: list[Video]
livestreams: list[Video]
shorts: list[Video]
reporter: Reporter
def new(path: Path, url: str) -> Channel:
"""Creates a new channel"""
# Details
print("Creating new channel..")
channel = Channel()
channel.path = Path(path)
channel.version = ARCHIVE_COMPAT
channel.url = url
channel.videos = []
channel.livestreams = []
channel.shorts = []
channel.reporter = Reporter(channel)
# Commit and return
channel.commit()
return channel
def _new_empty() -> Channel:
return Channel.new(
Path("pretend"), "https://www.youtube.com/channel/UCSMdm6bUYIBN0KfS2CVuEPA"
)
def load(path: Path) -> Channel:
"""Loads existing channel from path"""
# Check existence
path = Path(path)
channel_name = path.name
print(f"Loading {channel_name} channel..")
if not path.exists():
raise ArchiveNotFoundException("Archive doesn't exist")
# Load config
encoded = json.load(open(path / "yark.json", "r"))
# Check version before fully decoding and exit if wrong
archive_version = encoded["version"]
if archive_version != ARCHIVE_COMPAT:
encoded = _migrate_archive(
archive_version, ARCHIVE_COMPAT, encoded, channel_name
)
# Decode and return
return Channel._from_dict(encoded, path)
def metadata(self):
"""Queries YouTube for all channel metadata to refresh known videos"""
# Print loading progress at the start without loading indicator so theres always a print
msg = "Downloading metadata.."
print(msg, end="\r")
# Download metadata and give the user a spinner bar
with ThreadPoolExecutor() as ex:
# Make future for downloading metadata
future = ex.submit(self._download_metadata)
# Start spinning
with PieSpinner(f"{msg} ") as bar:
# Don't show bar for 2 seconds but check if future is done
no_bar_time = time.time() + 2
while time.time() < no_bar_time:
if future.done():
break
time.sleep(0.25)
# Show loading spinner
while not future.done():
bar.next()
time.sleep(0.075)
# Get result from thread now that it's finished
res = future.result()
# Uncomment for saving big dumps for testing
# with open(self.path / "dump.json", "w+") as file:
# json.dump(res, file)
# Uncomment for loading big dumps for testing
# res = json.load(open(self.path / "dump.json", "r"))
# Parse downloaded metadata
self._parse_metadata(res)
def _download_metadata(self) -> dict[str, Any]:
"""Downloads metadata dict and returns for further parsing"""
# Construct downloader
settings = {
# Centralized logging system; makes output fully quiet
"logger": VideoLogger(),
# Skip downloading pending livestreams (#60 <https://github.com/Owez/yark/issues/60>)
"ignore_no_formats_error": True,
# Concurrent fragment downloading for increased resilience (#109 <https://github.com/Owez/yark/issues/109>)
"concurrent_fragment_downloads": 8,
}
# Get response and snip it
with YoutubeDL(settings) as ydl:
for i in range(3):
try:
res: dict[str, Any] = ydl.extract_info(self.url, download=False)
return res
except Exception as exception:
# Report error
retrying = i != 2
_err_dl("metadata", exception, retrying)
# Print retrying message
if retrying:
print(
Style.DIM
+ f" • Retrying metadata download.."
+ Style.RESET_ALL
) # TODO: compat with loading bar
def _parse_metadata(self, res: dict[str, Any]):
"""Parses entirety of downloaded metadata"""
# Normalize into types of videos
videos = []
livestreams = []
shorts = []
if "entries" not in res["entries"][0]:
# Videos only
videos = res["entries"]
else:
# Videos and at least one other (livestream/shorts)
for entry in res["entries"]:
kind = entry["title"].split(" - ")[-1].lower()
if kind == "videos":
videos = entry["entries"]
elif kind == "live":
livestreams = entry["entries"]
elif kind == "shorts":
shorts = entry["entries"]
else:
_err_msg(f"Unknown video kind '{kind}' found", True)
# Parse metadata
self._parse_metadata_videos("video", videos, self.videos)
self._parse_metadata_videos("livestream", livestreams, self.livestreams)
self._parse_metadata_videos("shorts", shorts, self.shorts)
# Go through each and report deleted
self._report_deleted(self.videos)
self._report_deleted(self.livestreams)
self._report_deleted(self.shorts)
def download(self, config: DownloadConfig):
"""Downloads all videos which haven't already been downloaded"""
# Clean out old part files
self._clean_parts()
# Create settings for the downloader
settings = {
# Set the output path
"outtmpl": f"{self.path}/videos/%(id)s.%(ext)s",
# Centralized logger hook for ignoring all stdout
"logger": VideoLogger(),
# Logger hook for download progress
"progress_hooks": [VideoLogger.downloading],
}
if config.format is not None:
settings["format"] = config.format
# Attach to the downloader
with YoutubeDL(settings) as ydl:
# Retry downloading 5 times in total for all videos
for i in range(5):
# Try to curate a list and download videos on it
try:
# Curate list of non-downloaded videos
not_downloaded = self._curate(config)
# Stop if there's nothing to download
if len(not_downloaded) == 0:
break
# Print curated if this is the first time
if i == 0:
fmt_num = (
"a new video"
if len(not_downloaded) == 1
else f"{len(not_downloaded)} new videos"
)
print(f"Downloading {fmt_num}..")
# Continuously try to download after private/deleted videos are found
# This block gives the downloader all the curated videos and skips/reports deleted videos by filtering their exceptions
while True:
# Download from curated list then exit the optimistic loop
try:
urls = [video.url() for video in not_downloaded]
ydl.download(urls)
break
# Special handling for private/deleted videos which are archived, if not we raise again
except DownloadError as exception:
# Video is privated or deleted
if (
"Private video" in exception.msg
or "This video has been removed by the uploader"
in exception.msg
):
# Skip video from curated and get it as a return
not_downloaded, video = _skip_video(
not_downloaded, "deleted"
)
# If this is a new occurrence then set it & report
# This will only happen if its deleted after getting metadata, like in a dry run
if video.deleted.current() == False:
self.reporter.deleted.append(video)
video.deleted.update(None, True)
# User hasn't got ffmpeg installed and youtube hasn't got format 22
# NOTE: see #55 <https://github.com/Owez/yark/issues/55> to learn more
# NOTE: sadly yt-dlp doesn't let us access yt_dlp.utils.ContentTooShortError so we check msg
elif " bytes, expected " in exception.msg:
# Skip video from curated
not_downloaded, _ = _skip_video(
not_downloaded,
"no format found; please download ffmpeg!",
True,
)
# Nevermind, normal exception
else:
raise exception
# Stop if we've got them all
break
# Report error and retry/stop
except Exception as exception:
# Get around carriage return
if i == 0:
print()
# Report error
_err_dl("videos", exception, i != 4)
def search(self, id: str):
"""Searches channel for a video with the corresponding `id` and returns"""
# Search
for video in self.videos:
if video.id == id:
return video
# Raise exception if it's not found
raise VideoNotFoundException(f"Couldn't find {id} inside archive")
def _curate(self, config: DownloadConfig) -> list[Video]:
"""Curate videos which aren't downloaded and return their urls"""
def curate_list(videos: list[Video], maximum: Optional[int]) -> list[Video]:
"""Curates the videos inside of the provided `videos` list to it's local maximum"""
# Cut available videos to maximum if present for deterministic getting
if maximum is not None:
# Fix the maximum to the length so we don't try to get more than there is
fixed_maximum = min(max(len(videos) - 1, 0), maximum)
# Set the available videos to this fixed maximum
new_videos = []
for ind in range(fixed_maximum):
new_videos.append(videos[ind])
videos = new_videos
# Find undownloaded videos in available list
not_downloaded = []
for video in videos:
if not video.downloaded():
not_downloaded.append(video)
# Return
return not_downloaded
# Curate
not_downloaded = []
not_downloaded.extend(curate_list(self.videos, config.max_videos))
not_downloaded.extend(curate_list(self.livestreams, config.max_livestreams))
not_downloaded.extend(curate_list(self.shorts, config.max_shorts))
# Return
return not_downloaded
def commit(self):
"""Commits (saves) archive to path; do this once you've finished all of your transactions"""
# Save backup
self._backup()
# Directories
print(f"Committing {self} to file..")
paths = [self.path, self.path / "thumbnails", self.path / "videos"]
for path in paths:
if not path.exists():
path.mkdir()
# Config
with open(self.path / "yark.json", "w+") as file:
json.dump(self._to_dict(), file)
def _parse_metadata_videos(self, kind: str, i: list, bucket: list):
"""Parses metadata for a category of video into it's bucket and tells user what's happening"""
# Print at the start without loading indicator so theres always a print
msg = f"Parsing {kind} metadata.."
print(msg, end="\r")
# Start computing and show loading spinner
with ThreadPoolExecutor() as ex:
# Make future for computation of the video list
future = ex.submit(self._parse_metadata_videos_comp, i, bucket)
# Start spinning
with PieSpinner(f"{msg} ") as bar:
# Don't show bar for 2 seconds but check if future is done
no_bar_time = time.time() + 2
while time.time() < no_bar_time:
if future.done():
return
time.sleep(0.25)
# Spin until future is done
while not future.done():
time.sleep(0.075)
bar.next()
def _parse_metadata_videos_comp(self, i: list, bucket: list):
"""Computes the actual parsing for `_parse_metadata_videos` without outputting what's happening"""
for entry in i:
# Skip video if there's no formats available; happens with upcoming videos/livestreams
if "formats" not in entry or len(entry["formats"]) == 0:
continue
# Updated intra-loop marker
updated = False
# Update video if it exists
for video in bucket:
if video.id == entry["id"]:
video.update(entry)
updated = True
break
# Add new video if not
if not updated:
video = Video.new(entry, self)
bucket.append(video)
self.reporter.added.append(video)
# Sort videos by newest
bucket.sort(reverse=True)
def _report_deleted(self, videos: list):
"""Goes through a video category to report & save those which where not marked in the metadata as deleted if they're not already known to be deleted"""
for video in videos:
if video.deleted.current() == False and not video.known_not_deleted:
self.reporter.deleted.append(video)
video.deleted.update(None, True)
def _clean_parts(self):
"""Cleans old temporary `.part` files which where stopped during download if present"""
# Make a bucket for found files
deletion_bucket: list[Path] = []
# Scan through and find part files
videos = self.path / "videos"
for file in videos.iterdir():
if file.suffix == ".part" or file.suffix == ".ytdl":
deletion_bucket.append(file)
# Print and delete if there are part files present
if len(deletion_bucket) != 0:
print("Cleaning out previous temporary files..")
for file in deletion_bucket:
file.unlink()
def _backup(self):
"""Creates a backup of the existing `yark.json` file in path as `yark.bak` with added comments"""
# Get current archive path
ARCHIVE_PATH = self.path / "yark.json"
# Skip backing up if the archive doesn't exist
if not ARCHIVE_PATH.exists():
return
# Open original archive to copy
with open(self.path / "yark.json", "r") as file_archive:
# Add comment information to backup file
save = f"// Backup of a Yark archive, dated {datetime.utcnow().isoformat()}\n// Remove these comments and rename to 'yark.json' to restore\n{file_archive.read()}"
# Save new information into a new backup
with open(self.path / "yark.bak", "w+") as file_backup:
file_backup.write(save)
def _from_dict(encoded: dict, path: Path) -> Channel:
"""Decodes archive which is being loaded back up"""
channel = Channel()
channel.path = path
channel.version = encoded["version"]
channel.url = encoded["url"]
channel.reporter = Reporter(channel)
channel.videos = [
Video._from_dict(video, channel) for video in encoded["videos"]
]
channel.livestreams = [
Video._from_dict(video, channel) for video in encoded["livestreams"]
]
channel.shorts = [
Video._from_dict(video, channel) for video in encoded["shorts"]
]
return channel
def _to_dict(self) -> dict:
"""Converts channel data to a dictionary to commit"""
return {
"version": self.version,
"url": self.url,
"videos": [video._to_dict() for video in self.videos],
"livestreams": [video._to_dict() for video in self.livestreams],
"shorts": [video._to_dict() for video in self.shorts],
}
def __repr__(self) -> str:
return self.path.name
class Note:
"""Allows Yark users to add notes to videos"""
video: Video
id: str
timestamp: int
title: str
body: Optional[str]
def new(video: Video, timestamp: int, title: str, body: Optional[str] = None):
"""Creates a new note"""
note = Note()
note.video = video
note.id = str(uuid4())
note.timestamp = timestamp
note.title = title
note.body = body
return note
def _from_dict(video: Video, element: dict) -> Note:
"""Loads existing note attached to a video dict"""
note = Note()
note.video = video
note.id = element["id"]
note.timestamp = element["timestamp"]
note.title = element["title"]
note.body = element["body"]
return note
def _to_dict(self) -> dict:
"""Converts note to dictionary representation"""
return {
"id": self.id,
"timestamp": self.timestamp,
"title": self.title,
"body": self.body,
}
The provided code snippet includes necessary dependencies for implementing the `video` function. Write a Python function `def video(name, kind, id)` to solve the following problem:
Detailed video information and viewer
Here is the function:
def video(name, kind, id):
"""Detailed video information and viewer"""
if kind not in ["videos", "livestreams", "shorts"]:
return redirect(
url_for("routes.channel", name=name, error="Video kind not recognised")
)
try:
# Get information
channel = Channel.load(name)
video = channel.search(id)
# Return video webpage
if request.method == "GET":
title = f"{video.title.current()} · {name}"
views_data = json.dumps(video.views._to_dict())
likes_data = json.dumps(video.likes._to_dict())
return render_template(
"video.html",
title=title,
name=name,
video=video,
views_data=views_data,
likes_data=likes_data,
)
# Add new note
elif request.method == "POST":
# Parse json
new = request.get_json()
if not "title" in new:
return "Invalid schema", 400
# Create note
timestamp = _decode_timestamp(new["timestamp"])
title = new["title"]
body = new["body"] if "body" in new else None
note = Note.new(video, timestamp, title, body)
# Save new note
video.notes.append(note)
video.channel.commit()
# Return
return note._to_dict(), 200
# Update existing note
elif request.method == "PATCH":
# Parse json
update = request.get_json()
if not "id" in update or (not "title" in update and not "body" in update):
return "Invalid schema", 400
# Find note
try:
note = video.search(update["id"])
except NoteNotFoundException:
return "Note not found", 404
# Update and save
if "title" in update:
note.title = update["title"]
if "body" in update:
note.body = update["body"]
video.channel.commit()
# Return
return "Updated", 200
# Delete existing note
elif request.method == "DELETE":
# Parse json
delete = request.get_json()
if not "id" in delete:
return "Invalid schema", 400
# Filter out note with id and save
filtered_notes = []
for note in video.notes:
if note.id != delete["id"]:
filtered_notes.append(note)
video.notes = filtered_notes
video.channel.commit()
# Return
return "Deleted", 200
# Archive not found
except ArchiveNotFoundException:
return redirect(
url_for("routes.index", error="Couldn't open channel's archive")
)
# Video not found
except VideoNotFoundException:
return redirect(url_for("routes.index", error="Couldn't find video in archive"))
# Timestamp for note was invalid
except TimestampException:
return "Invalid timestamp", 400
# Unknown error
except Exception as e:
return redirect(url_for("routes.index", error=f"Internal server error:\n{e}")) | Detailed video information and viewer |
156,536 | import json
import os
from flask import (
Flask,
render_template,
request,
redirect,
url_for,
send_from_directory,
Blueprint,
)
import logging
from .errors import (
ArchiveNotFoundException,
NoteNotFoundException,
VideoNotFoundException,
TimestampException,
)
from .channel import Channel
from .video import Note
The provided code snippet includes necessary dependencies for implementing the `archive_video` function. Write a Python function `def archive_video(name, file)` to solve the following problem:
Serves video file using it's filename (id + ext)
Here is the function:
def archive_video(name, file):
"""Serves video file using it's filename (id + ext)"""
return send_from_directory(os.getcwd(), f"{name}/videos/{file}") | Serves video file using it's filename (id + ext) |
156,537 | import json
import os
from flask import (
Flask,
render_template,
request,
redirect,
url_for,
send_from_directory,
Blueprint,
)
import logging
from .errors import (
ArchiveNotFoundException,
NoteNotFoundException,
VideoNotFoundException,
TimestampException,
)
from .channel import Channel
from .video import Note
The provided code snippet includes necessary dependencies for implementing the `archive_thumbnail` function. Write a Python function `def archive_thumbnail(name, id)` to solve the following problem:
Serves thumbnail file using it's id
Here is the function:
def archive_thumbnail(name, id):
"""Serves thumbnail file using it's id"""
return send_from_directory(os.getcwd(), f"{name}/thumbnails/{id}.webp") | Serves thumbnail file using it's id |
156,538 | from __future__ import annotations
from datetime import datetime
import json
from pathlib import Path
import time
from yt_dlp import YoutubeDL, DownloadError
from colorama import Style, Fore
import sys
from .reporter import Reporter
from .errors import ArchiveNotFoundException, _err_msg, VideoNotFoundException
from .video import Video, Element
from typing import Any
import time
from progress.spinner import PieSpinner
from concurrent.futures import ThreadPoolExecutor
import time
from typing import Optional
class Video:
channel: "Channel"
id: str
uploaded: datetime
width: int
height: int
title: "Element"
description: "Element"
views: "Element"
likes: "Element"
thumbnail: "Element"
deleted: "Element"
notes: list["Note"]
def new(entry: dict[str, Any], channel) -> Video:
"""Create new video from metadata entry"""
# Normal
video = Video()
video.channel = channel
video.id = entry["id"]
video.uploaded = _decode_date_yt(entry["upload_date"])
video.width = entry["width"]
video.height = entry["height"]
video.title = Element.new(video, entry["title"])
video.description = Element.new(video, entry["description"])
video.views = Element.new(video, entry["view_count"])
video.likes = Element.new(
video, entry["like_count"] if "like_count" in entry else None
)
video.thumbnail = Element.new(video, Thumbnail.new(entry["thumbnail"], video))
video.deleted = Element.new(video, False)
video.notes = []
# Runtime-only
video.known_not_deleted = True
# Return
return video
def _new_empty() -> Video:
fake_entry = {"hi": True} # TODO: finish
return Video.new(fake_entry, Channel._new_empty())
def update(self, entry: dict):
"""Updates video using new schema, adding a new timestamp to any changes"""
# Normal
self.title.update("title", entry["title"])
self.description.update("description", entry["description"])
self.views.update("view count", entry["view_count"])
self.likes.update(
"like count", entry["like_count"] if "like_count" in entry else None
)
self.thumbnail.update("thumbnail", Thumbnail.new(entry["thumbnail"], self))
self.deleted.update("undeleted", False)
# Runtime-only
self.known_not_deleted = True
def filename(self) -> Optional[str]:
"""Returns the filename for the downloaded video, if any"""
videos = self.channel.path / "videos"
for file in videos.iterdir():
if file.stem == self.id and file.suffix != ".part":
return file.name
return None
def downloaded(self) -> bool:
"""Checks if this video has been downloaded"""
return self.filename() is not None
def updated(self) -> bool:
"""Checks if this video's title or description or deleted status have been ever updated"""
return (
len(self.title.inner) > 1
or len(self.description.inner) > 1
or len(self.deleted.inner) > 1
)
def search(self, id: str):
"""Searches video for note's id"""
for note in self.notes:
if note.id == id:
return note
raise NoteNotFoundException(f"Couldn't find note {id}")
def url(self) -> str:
"""Returns the YouTube watch url of the current video"""
# NOTE: livestreams and shorts are currently just videos and can be seen via a normal watch url
return f"https://www.youtube.com/watch?v={self.id}"
def _from_dict(encoded: dict, channel) -> Video:
"""Converts id and encoded dictionary to video for loading a channel"""
# Normal
video = Video()
video.channel = channel
video.id = encoded["id"]
video.uploaded = datetime.fromisoformat(encoded["uploaded"])
video.width = encoded["width"]
video.height = encoded["height"]
video.title = Element._from_dict(encoded["title"], video)
video.description = Element._from_dict(encoded["description"], video)
video.views = Element._from_dict(encoded["views"], video)
video.likes = Element._from_dict(encoded["likes"], video)
video.thumbnail = Thumbnail._from_element(encoded["thumbnail"], video)
video.notes = [Note._from_dict(video, note) for note in encoded["notes"]]
video.deleted = Element._from_dict(encoded["deleted"], video)
# Runtime-only
video.known_not_deleted = False
# Return
return video
def _to_dict(self) -> dict:
"""Converts video information to dictionary for committing, doesn't include id"""
return {
"id": self.id,
"uploaded": self.uploaded.isoformat(),
"width": self.width,
"height": self.height,
"title": self.title._to_dict(),
"description": self.description._to_dict(),
"views": self.views._to_dict(),
"likes": self.likes._to_dict(),
"thumbnail": self.thumbnail._to_dict(),
"deleted": self.deleted._to_dict(),
"notes": [note._to_dict() for note in self.notes],
}
def __repr__(self) -> str:
# Title
title = _truncate_text(self.title.current())
# Views and likes
views = _magnitude(self.views.current()).ljust(6)
likes = _magnitude(self.likes.current()).ljust(6)
# Width and height
width = self.width if self.width is not None else "?"
height = self.height if self.height is not None else "?"
# Upload date
uploaded = _encode_date_human(self.uploaded)
# Return
return f"{title} 🔎{views} │ 👍{likes} │ 📅{uploaded} │ 📺{width}x{height}"
def __lt__(self, other) -> bool:
return self.uploaded < other.uploaded
The provided code snippet includes necessary dependencies for implementing the `_skip_video` function. Write a Python function `def _skip_video( videos: list[Video], reason: str, warning: bool = False, ) -> tuple[list[Video], Video]` to solve the following problem:
Skips first undownloaded video in `videos`, make sure there's at least one to skip otherwise an exception will be thrown
Here is the function:
def _skip_video(
videos: list[Video],
reason: str,
warning: bool = False,
) -> tuple[list[Video], Video]:
"""Skips first undownloaded video in `videos`, make sure there's at least one to skip otherwise an exception will be thrown"""
# Find fist undownloaded video
for ind, video in enumerate(videos):
if not video.downloaded():
# Tell the user we're skipping over it
if warning:
print(
Fore.YELLOW + f" • Skipping {video.id} ({reason})" + Fore.RESET,
file=sys.stderr,
)
else:
print(
Style.DIM + f" • Skipping {video.id} ({reason})" + Style.NORMAL,
)
# Set videos to skip over this one
videos = videos[ind + 1 :]
# Return the corrected list and the video found
return videos, video
# Shouldn't happen, see docs
raise Exception(
"We expected to skip a video and return it but nothing to skip was found"
) | Skips first undownloaded video in `videos`, make sure there's at least one to skip otherwise an exception will be thrown |
156,539 | from __future__ import annotations
from datetime import datetime
import json
from pathlib import Path
import time
from yt_dlp import YoutubeDL, DownloadError
from colorama import Style, Fore
import sys
from .reporter import Reporter
from .errors import ArchiveNotFoundException, _err_msg, VideoNotFoundException
from .video import Video, Element
from typing import Any
import time
from progress.spinner import PieSpinner
from concurrent.futures import ThreadPoolExecutor
import time
from typing import Optional
def _err_msg(msg: str, report_msg: bool = False):
"""Provides a red-coloured error message to the user in the STDERR pipe"""
msg = (
msg
if not report_msg
else f"{msg}\nPlease file a bug report if you think this is a problem with Yark!"
)
print(Fore.RED + Style.BRIGHT + msg + Style.NORMAL + Fore.RESET, file=sys.stderr)
class Video:
channel: "Channel"
id: str
uploaded: datetime
width: int
height: int
title: "Element"
description: "Element"
views: "Element"
likes: "Element"
thumbnail: "Element"
deleted: "Element"
notes: list["Note"]
def new(entry: dict[str, Any], channel) -> Video:
"""Create new video from metadata entry"""
# Normal
video = Video()
video.channel = channel
video.id = entry["id"]
video.uploaded = _decode_date_yt(entry["upload_date"])
video.width = entry["width"]
video.height = entry["height"]
video.title = Element.new(video, entry["title"])
video.description = Element.new(video, entry["description"])
video.views = Element.new(video, entry["view_count"])
video.likes = Element.new(
video, entry["like_count"] if "like_count" in entry else None
)
video.thumbnail = Element.new(video, Thumbnail.new(entry["thumbnail"], video))
video.deleted = Element.new(video, False)
video.notes = []
# Runtime-only
video.known_not_deleted = True
# Return
return video
def _new_empty() -> Video:
fake_entry = {"hi": True} # TODO: finish
return Video.new(fake_entry, Channel._new_empty())
def update(self, entry: dict):
"""Updates video using new schema, adding a new timestamp to any changes"""
# Normal
self.title.update("title", entry["title"])
self.description.update("description", entry["description"])
self.views.update("view count", entry["view_count"])
self.likes.update(
"like count", entry["like_count"] if "like_count" in entry else None
)
self.thumbnail.update("thumbnail", Thumbnail.new(entry["thumbnail"], self))
self.deleted.update("undeleted", False)
# Runtime-only
self.known_not_deleted = True
def filename(self) -> Optional[str]:
"""Returns the filename for the downloaded video, if any"""
videos = self.channel.path / "videos"
for file in videos.iterdir():
if file.stem == self.id and file.suffix != ".part":
return file.name
return None
def downloaded(self) -> bool:
"""Checks if this video has been downloaded"""
return self.filename() is not None
def updated(self) -> bool:
"""Checks if this video's title or description or deleted status have been ever updated"""
return (
len(self.title.inner) > 1
or len(self.description.inner) > 1
or len(self.deleted.inner) > 1
)
def search(self, id: str):
"""Searches video for note's id"""
for note in self.notes:
if note.id == id:
return note
raise NoteNotFoundException(f"Couldn't find note {id}")
def url(self) -> str:
"""Returns the YouTube watch url of the current video"""
# NOTE: livestreams and shorts are currently just videos and can be seen via a normal watch url
return f"https://www.youtube.com/watch?v={self.id}"
def _from_dict(encoded: dict, channel) -> Video:
"""Converts id and encoded dictionary to video for loading a channel"""
# Normal
video = Video()
video.channel = channel
video.id = encoded["id"]
video.uploaded = datetime.fromisoformat(encoded["uploaded"])
video.width = encoded["width"]
video.height = encoded["height"]
video.title = Element._from_dict(encoded["title"], video)
video.description = Element._from_dict(encoded["description"], video)
video.views = Element._from_dict(encoded["views"], video)
video.likes = Element._from_dict(encoded["likes"], video)
video.thumbnail = Thumbnail._from_element(encoded["thumbnail"], video)
video.notes = [Note._from_dict(video, note) for note in encoded["notes"]]
video.deleted = Element._from_dict(encoded["deleted"], video)
# Runtime-only
video.known_not_deleted = False
# Return
return video
def _to_dict(self) -> dict:
"""Converts video information to dictionary for committing, doesn't include id"""
return {
"id": self.id,
"uploaded": self.uploaded.isoformat(),
"width": self.width,
"height": self.height,
"title": self.title._to_dict(),
"description": self.description._to_dict(),
"views": self.views._to_dict(),
"likes": self.likes._to_dict(),
"thumbnail": self.thumbnail._to_dict(),
"deleted": self.deleted._to_dict(),
"notes": [note._to_dict() for note in self.notes],
}
def __repr__(self) -> str:
# Title
title = _truncate_text(self.title.current())
# Views and likes
views = _magnitude(self.views.current()).ljust(6)
likes = _magnitude(self.likes.current()).ljust(6)
# Width and height
width = self.width if self.width is not None else "?"
height = self.height if self.height is not None else "?"
# Upload date
uploaded = _encode_date_human(self.uploaded)
# Return
return f"{title} 🔎{views} │ 👍{likes} │ 📅{uploaded} │ 📺{width}x{height}"
def __lt__(self, other) -> bool:
return self.uploaded < other.uploaded
class Element:
video: Video
inner: dict[datetime, Any]
def new(video: Video, data):
"""Creates new element attached to a video with some initial data"""
element = Element()
element.video = video
element.inner = {datetime.utcnow(): data}
return element
def update(self, kind: Optional[str], data):
"""Updates element if it needs to be and returns self, reports change unless `kind` is none"""
# Check if updating is needed
has_id = hasattr(data, "id")
current = self.current()
if (not has_id and current != data) or (has_id and data.id != current.id):
# Update
self.inner[datetime.utcnow()] = data
# Report if wanted
if kind is not None:
self.video.channel.reporter.add_updated(kind, self)
# Return self
return self
def current(self):
"""Returns most recent element"""
return self.inner[list(self.inner.keys())[-1]]
def changed(self) -> bool:
"""Checks if the value has ever been modified from it's original state"""
return len(self.inner) > 1
def _from_dict(encoded: dict, video: Video) -> Element:
"""Converts encoded dictionary into element"""
# Basics
element = Element()
element.video = video
element.inner = {}
# Inner elements
for key in encoded:
date = datetime.fromisoformat(key)
element.inner[date] = encoded[key]
# Return
return element
def _to_dict(self) -> dict:
"""Converts element to dictionary for committing"""
# Convert each item
encoded = {}
for date in self.inner:
# Convert element value if method available to support custom
data = self.inner[date]
data = data._to_element() if hasattr(data, "_to_element") else data
# Add encoded data to iso-formatted string date
encoded[date.isoformat()] = data
# Return
return encoded
The provided code snippet includes necessary dependencies for implementing the `_migrate_archive` function. Write a Python function `def _migrate_archive( current_version: int, expected_version: int, encoded: dict, channel_name: str ) -> dict` to solve the following problem:
Automatically migrates an archive from one version to another by bootstrapping
Here is the function:
def _migrate_archive(
current_version: int, expected_version: int, encoded: dict, channel_name: str
) -> dict:
"""Automatically migrates an archive from one version to another by bootstrapping"""
def migrate_step(cur: int, encoded: dict) -> dict:
"""Step in recursion to migrate from one to another, contains migration logic"""
# Stop because we've reached the desired version
if cur == expected_version:
return encoded
# From version 1 to version 2
elif cur == 1:
# Channel id to url
encoded["url"] = "https://www.youtube.com/channel/" + encoded["id"]
del encoded["id"]
print(
Fore.YELLOW
+ "Please make sure "
+ encoded["url"]
+ " is the correct url"
+ Fore.RESET
)
# Empty livestreams/shorts lists
encoded["livestreams"] = []
encoded["shorts"] = []
# From version 2 to version 3
elif cur == 2:
# Add deleted status to every video/livestream/short
# NOTE: none is fine for new elements, just a slight bodge
for video in encoded["videos"]:
video["deleted"] = Element.new(Video._new_empty(), False)._to_dict()
for video in encoded["livestreams"]:
video["deleted"] = Element.new(Video._new_empty(), False)._to_dict()
for video in encoded["shorts"]:
video["deleted"] = Element.new(Video._new_empty(), False)._to_dict()
# Unknown version
else:
_err_msg(f"Unknown archive version v{cur} found during migration", True)
sys.exit(1)
# Increment version and run again until version has been reached
cur += 1
encoded["version"] = cur
return migrate_step(cur, encoded)
# Inform user of the backup process
print(
Fore.YELLOW
+ f"Automatically migrating archive from v{current_version} to v{expected_version}, a backup has been made at {channel_name}/yark.bak"
+ Fore.RESET
)
# Start recursion step
return migrate_step(current_version, encoded) | Automatically migrates an archive from one version to another by bootstrapping |
156,540 | from __future__ import annotations
from datetime import datetime
import json
from pathlib import Path
import time
from yt_dlp import YoutubeDL, DownloadError
from colorama import Style, Fore
import sys
from .reporter import Reporter
from .errors import ArchiveNotFoundException, _err_msg, VideoNotFoundException
from .video import Video, Element
from typing import Any
import time
from progress.spinner import PieSpinner
from concurrent.futures import ThreadPoolExecutor
import time
from typing import Optional
def _err_msg(msg: str, report_msg: bool = False):
"""Provides a red-coloured error message to the user in the STDERR pipe"""
msg = (
msg
if not report_msg
else f"{msg}\nPlease file a bug report if you think this is a problem with Yark!"
)
print(Fore.RED + Style.BRIGHT + msg + Style.NORMAL + Fore.RESET, file=sys.stderr)
The provided code snippet includes necessary dependencies for implementing the `_err_dl` function. Write a Python function `def _err_dl(name: str, exception: DownloadError, retrying: bool)` to solve the following problem:
Prints errors to stdout depending on what kind of download error occurred
Here is the function:
def _err_dl(name: str, exception: DownloadError, retrying: bool):
"""Prints errors to stdout depending on what kind of download error occurred"""
# Default message
msg = f"Unknown error whilst downloading {name}, details below:\n{exception}"
# Types of errors
ERRORS = [
"<urlopen error [Errno 8] nodename nor servname provided, or not known>",
"500",
"Got error: The read operation timed out",
"No such file or directory",
"HTTP Error 404: Not Found",
"<urlopen error timed out>",
]
# Download errors
if type(exception) == DownloadError:
# Server connection
if ERRORS[0] in exception.msg:
msg = "Issue connecting with YouTube's servers"
# Server fault
elif ERRORS[1] in exception.msg:
msg = "Fault with YouTube's servers"
# Timeout
elif ERRORS[2] in exception.msg:
msg = "Timed out trying to download video"
# Video deleted whilst downloading
elif ERRORS[3] in exception.msg:
msg = "Video deleted whilst downloading"
# Channel not found, might need to retry with alternative route
elif ERRORS[4] in exception.msg:
msg = "Couldn't find channel by it's id"
# Random timeout; not sure if its user-end or youtube-end
elif ERRORS[5] in exception.msg:
msg = "Timed out trying to reach YouTube"
# Print error
suffix = ", retrying in a few seconds.." if retrying else ""
print(
Fore.YELLOW + " • " + msg + suffix.ljust(40) + Fore.RESET,
file=sys.stderr,
)
# Wait if retrying, exit if failed
if retrying:
time.sleep(5)
else:
_err_msg(f" • Sorry, failed to download {name}", True)
sys.exit(1) | Prints errors to stdout depending on what kind of download error occurred |
156,541 | from __future__ import annotations
from datetime import datetime
import json
from pathlib import Path
import time
from yt_dlp import YoutubeDL, DownloadError
import sys
from .reporter import Reporter
from ..errors import ArchiveNotFoundException, MetadataFailException
from .video.video import Video, Videos
from .comment_author import CommentAuthor
from typing import Optional, Any
from .config import Config, YtDlpSettings
from .converter import Converter
from .migrator import _migrate
from ..utils import ARCHIVE_COMPAT, _log_err
from dataclasses import dataclass
import logging
The provided code snippet includes necessary dependencies for implementing the `_skip_video` function. Write a Python function `def _skip_video( videos: list[Video], reason: str, warning: bool = False, ) -> tuple[list[Video], Video]` to solve the following problem:
Skips first undownloaded video in `videos`, make sure there's at least one to skip otherwise an exception will be thrown
Here is the function:
def _skip_video(
videos: list[Video],
reason: str,
warning: bool = False,
) -> tuple[list[Video], Video]:
"""Skips first undownloaded video in `videos`, make sure there's at least one to skip otherwise an exception will be thrown"""
# Find fist undownloaded video
for ind, video in enumerate(videos):
if not video.downloaded():
# Tell the user we're skipping over it
if warning:
logging.warn(
f"Skipping video {video.id} download for {video.archive} ({reason})"
)
else:
logging.info(
f"Skipping video {video.id} download for {video.archive} ({reason})"
)
# Set videos to skip over this one
videos = videos[ind + 1 :]
# Return the corrected list and the video found
return videos, video
# Shouldn't happen, see docs
raise Exception(
"We expected to skip a video and return it but nothing to skip was found"
) | Skips first undownloaded video in `videos`, make sure there's at least one to skip otherwise an exception will be thrown |
156,542 | from __future__ import annotations
from datetime import datetime
import json
from pathlib import Path
import time
from yt_dlp import YoutubeDL, DownloadError
import sys
from .reporter import Reporter
from ..errors import ArchiveNotFoundException, MetadataFailException
from .video.video import Video, Videos
from .comment_author import CommentAuthor
from typing import Optional, Any
from .config import Config, YtDlpSettings
from .converter import Converter
from .migrator import _migrate
from ..utils import ARCHIVE_COMPAT, _log_err
from dataclasses import dataclass
import logging
The provided code snippet includes necessary dependencies for implementing the `_download_error` function. Write a Python function `def _download_error( archive_name: str, exception: DownloadError, retrying: bool ) -> None` to solve the following problem:
Logs errors depending on what kind of download error occurred
Here is the function:
def _download_error(
archive_name: str, exception: DownloadError, retrying: bool
) -> None:
"""Logs errors depending on what kind of download error occurred"""
# Default message
msg = (
f"Unknown error whilst downloading {archive_name}, details below:\n{exception}"
)
# Types of errors
ERRORS = [
"<urlopen error [Errno 8] nodename nor servname provided, or not known>",
"500",
"Got error: The read operation timed out",
"No such file or directory",
"HTTP Error 404: Not Found",
"<urlopen error timed out>",
"Did not get any data blocks",
]
# Download errors
if type(exception) == DownloadError:
# Server connection
if ERRORS[0] in exception.msg or ERRORS[5] in exception.msg:
msg = "Issue connecting with YouTube's servers"
# Server fault
elif ERRORS[1] in exception.msg:
msg = "Fault with YouTube's servers"
# Timeout
elif ERRORS[2] in exception.msg:
msg = "Timed out trying to download video"
# Video deleted whilst downloading
elif ERRORS[3] in exception.msg:
msg = "Video deleted whilst downloading"
# Target not found, might need to retry with alternative route
elif ERRORS[4] in exception.msg:
msg = "Couldn't find target by it's id"
# Random timeout; not sure if its user-end or youtube-end
elif ERRORS[5] in exception.msg:
msg = "Timed out trying to reach YouTube"
# Log error
suffix = ", retrying in a few seconds.." if retrying else ""
logging.warn(msg + suffix)
# Wait if retrying, exit if failed
if retrying:
time.sleep(5)
else:
_log_err(f"Sorry, failed to download {archive_name}", True)
sys.exit(1) | Logs errors depending on what kind of download error occurred |
156,543 | import argparse
import os
import logging
import colorama
from colorama import Fore, Style
from . import client
from urllib.parse import urlparse, parse_qs, urlencode
import os
logging.basicConfig(format=log_format, level=logging.INFO)
logging.getLogger('').handlers[0].setFormatter(logging.Formatter(log_format))
def clean_urls(urls, extensions, placeholder):
"""
Clean a list of URLs by removing unnecessary parameters and query strings.
Args:
urls (list): List of URLs to clean.
extensions (list): List of file extensions to check against.
Returns:
list: List of cleaned URLs.
"""
cleaned_urls = set()
for url in urls:
cleaned_url = clean_url(url)
if not has_extension(cleaned_url, extensions):
parsed_url = urlparse(cleaned_url)
query_params = parse_qs(parsed_url.query)
cleaned_params = {key: placeholder for key in query_params}
cleaned_query = urlencode(cleaned_params, doseq=True)
cleaned_url = parsed_url._replace(query=cleaned_query).geturl()
cleaned_urls.add(cleaned_url)
return list(cleaned_urls)
The provided code snippet includes necessary dependencies for implementing the `fetch_and_clean_urls` function. Write a Python function `def fetch_and_clean_urls(domain, extensions, stream_output,proxy, placeholder)` to solve the following problem:
Fetch and clean URLs related to a specific domain from the Wayback Machine. Args: domain (str): The domain name to fetch URLs for. extensions (list): List of file extensions to check against. stream_output (bool): True to stream URLs to the terminal. Returns: None
Here is the function:
def fetch_and_clean_urls(domain, extensions, stream_output,proxy, placeholder):
"""
Fetch and clean URLs related to a specific domain from the Wayback Machine.
Args:
domain (str): The domain name to fetch URLs for.
extensions (list): List of file extensions to check against.
stream_output (bool): True to stream URLs to the terminal.
Returns:
None
"""
logging.info(f"{Fore.YELLOW}[INFO]{Style.RESET_ALL} Fetching URLs for {Fore.CYAN + domain + Style.RESET_ALL}")
wayback_uri = f"https://web.archive.org/cdx/search/cdx?url={domain}/*&output=txt&collapse=urlkey&fl=original&page=/"
response = client.fetch_url_content(wayback_uri,proxy)
urls = response.text.split()
logging.info(f"{Fore.YELLOW}[INFO]{Style.RESET_ALL} Found {Fore.GREEN + str(len(urls)) + Style.RESET_ALL} URLs for {Fore.CYAN + domain + Style.RESET_ALL}")
cleaned_urls = clean_urls(urls, extensions, placeholder)
logging.info(f"{Fore.YELLOW}[INFO]{Style.RESET_ALL} Cleaning URLs for {Fore.CYAN + domain + Style.RESET_ALL}")
logging.info(f"{Fore.YELLOW}[INFO]{Style.RESET_ALL} Found {Fore.GREEN + str(len(cleaned_urls)) + Style.RESET_ALL} URLs after cleaning")
logging.info(f"{Fore.YELLOW}[INFO]{Style.RESET_ALL} Extracting URLs with parameters")
results_dir = "results"
if not os.path.exists(results_dir):
os.makedirs(results_dir)
result_file = os.path.join(results_dir, f"{domain}.txt")
with open(result_file, "w") as f:
for url in cleaned_urls:
if "?" in url:
f.write(url + "\n")
if stream_output:
print(url)
logging.info(f"{Fore.YELLOW}[INFO]{Style.RESET_ALL} Saved cleaned URLs to {Fore.CYAN + result_file + Style.RESET_ALL}") | Fetch and clean URLs related to a specific domain from the Wayback Machine. Args: domain (str): The domain name to fetch URLs for. extensions (list): List of file extensions to check against. stream_output (bool): True to stream URLs to the terminal. Returns: None |
156,544 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
"cyclecount_status",
"bin_name",
"goods_code",
"goods_desc",
"goods_qty",
"physical_inventory",
"difference",
"creater",
"create_time",
"update_time"
] | null |
156,545 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('cyclecount_status', u'盘点状态'),
('bin_name', u'库位名称'),
('goods_code', u'商品编码'),
('goods_desc', u'商品描述'),
('goods_qty', u'现有数量'),
('physical_inventory', u'盘点数量'),
('difference', u'盘点差异'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'盘点时间')
]) | null |
156,546 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('cyclecount_status', u'Count Status'),
('bin_nam', u'Bin Name'),
('goods_code', u'Goods Code'),
('goods_desc', u'Goods Description'),
('goods_qty', u'On-Hand Stock'),
('physical_inventory', u'Count QTY'),
('difference', u'Count Difference'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,547 | from django.apps import AppConfig
from django.db.models.signals import post_migrate
def init_category():
"""
:return:None
"""
try:
from .models import ListModel as ls
if ls.objects.filter(openid__iexact='init_data').exists():
if ls.objects.filter(openid__iexact='init_data').count() != 4:
ls.objects.filter(openid__iexact='init_data').delete()
init_data = [
ls(id=1, openid='init_data', bin_property='Damage', creater='GreaterWMS'),
ls(id=2, openid='init_data', bin_property='Inspection', creater='GreaterWMS'),
ls(id=3, openid='init_data', bin_property='Normal', creater='GreaterWMS'),
ls(id=4, openid='init_data', bin_property='Holding', creater='GreaterWMS')
]
ls.objects.bulk_create(init_data, batch_size=100)
else:
init_data = [
ls(id=1, openid='init_data', bin_property='Damage', creater='GreaterWMS'),
ls(id=2, openid='init_data', bin_property='Inspection', creater='GreaterWMS'),
ls(id=3, openid='init_data', bin_property='Normal', creater='GreaterWMS'),
ls(id=4, openid='init_data', bin_property='Holding', creater='GreaterWMS')
]
ls.objects.bulk_create(init_data, batch_size=100)
except:
pass
def do_init_data(sender, **kwargs):
init_category() | null |
156,548 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'bin_size',
'bin_size_w',
'bin_size_d',
'bin_size_h',
'creater',
'create_time',
'update_time'
] | null |
156,549 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('bin_size', u'库位尺寸名称'),
('bin_size_w', u'库位尺寸长度'),
('bin_size_d', u'库位尺寸宽度'),
('bin_size_h', u'库位尺寸高度'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间'),
]) | null |
156,550 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('bin_size', u'Bin Size'),
('bin_size_w', u'Bin Wide'),
('bin_size_d', u'Bin Depth'),
('bin_size_h', u'Bin Height'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time'),
]) | null |
156,551 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers_bin():
return [
'bin_name',
'goods_code',
'goods_desc',
'goods_qty',
'pick_qty',
'picked_qty',
'bin_size',
'bin_property',
'create_time',
'update_time'
] | null |
156,552 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header_bin():
return dict([
('bin_name', u'库位名称'),
('goods_code', u'商品编码'),
('goods_desc', u'商品描述'),
('goods_qty', u'商品数量'),
('pick_qty', u'等待拣货数量'),
('picked_qty', u'已拣货数量'),
('bin_size', u'库位尺寸'),
('bin_property', u'库位属性'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,553 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header_bin():
return dict([
('bin_name', u'Bin Name'),
('goods_code', u'Goods Code'),
('goods_desc', u'Goods Description'),
('goods_qty', u'Goods Qty'),
('pick_qty', u'Pick Stock'),
('picked_qty', u'Picked Stock'),
('bin_size', u'Bin Size'),
('bin_property', u'Bin Property'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,554 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers_list():
return [
'goods_code',
'goods_desc',
'goods_qty',
'onhand_stock',
'can_order_stock',
'ordered_stock',
'inspect_stock',
'hold_stock',
'damage_stock',
'asn_stock',
'dn_stock',
'pre_load_stock',
'pre_sort_stock',
'sorted_stock',
'pick_stock',
'picked_stock',
'back_order_stock',
'create_time',
'update_time'
] | null |
156,555 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header_list():
return dict([
('goods_code', u'商品编码'),
('goods_desc', u'商品描述'),
('goods_qty', u'商品数量'),
('onhand_stock', u'现有库存'),
('can_order_stock', u'可被下单数量'),
('ordered_stock', u'已被下单数量'),
('inspect_stock', u'质检库存'),
('hold_stock', u'锁定库存'),
('damage_stock', u'破损库存'),
('asn_stock', u'到货通知书数量'),
('dn_stock', u'发货单数量'),
('pre_load_stock', u'等待卸货数量'),
('pre_sort_stock', u'等待分拣数量'),
('sorted_stock', u'已分拣数量'),
('pick_stock', u'等待拣货数量'),
('picked_stock', u'已拣货数量'),
('back_order_stock', u'欠货数量'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,556 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header_list():
return dict([
('goods_code', u'Goods Code'),
('goods_desc', u'Goods Description'),
('goods_qty', u'Goods Qty'),
('onhand_stock', u'Onhand Stock'),
('can_order_stock', u'Can Order Stock'),
('ordered_stock', u'Ordered Stock'),
('inspect_stock', u'Inspect Stock'),
('hold_stock', u'Hold Stock'),
('damage_stock', u'Damage Stock'),
('asn_stock', u'ASN Stock'),
('dn_stock', u'DN Stock'),
('pre_load_stock', u'Pre Load Stock'),
('pre_sort_stock', u'Pre Sort Stock'),
('sorted_stock', u'Sorted Stock'),
('pick_stock', u'Pick Stock'),
('picked_stock', u'Picked Stock'),
('back_order_stock', u'Back Order Stock'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,557 | from rest_framework_csv.renderers import CSVStreamingRenderer
def file_headers():
return [
'goods_code',
'goods_desc',
'goods_supplier',
'goods_weight',
'goods_w',
'goods_d',
'goods_h',
'unit_volume',
'goods_unit',
'goods_class',
'goods_brand',
'goods_color',
'goods_shape',
'goods_specs',
'goods_origin',
'goods_cost',
'goods_price',
'creater',
'create_time',
'update_time'
] | null |
156,558 | from rest_framework_csv.renderers import CSVStreamingRenderer
def cn_data_header():
return dict([
('goods_code', u'商品编码'),
('goods_desc', u'商品描述'),
('goods_supplier', u'商品供应商'),
('goods_weight', u'商品单位重量'),
('goods_w', u'商品单位长度'),
('goods_d', u'商品单位宽度'),
('goods_h', u'商品单位高度'),
('unit_volume', u'最小单位体积'),
('goods_unit', u'商品单位'),
('goods_class', u'商品类别'),
('goods_brand', u'商品品牌'),
('goods_color', u'商品颜色'),
('goods_shape', u'商品形状'),
('goods_specs', u'商品规格'),
('goods_origin', u'商品产地'),
('goods_cost', u'商品成本'),
('goods_price', u'商品价格'),
('creater', u'创建人'),
('create_time', u'创建时间'),
('update_time', u'更新时间')
]) | null |
156,559 | from rest_framework_csv.renderers import CSVStreamingRenderer
def en_data_header():
return dict([
('goods_code', u'Goods Code'),
('goods_desc', u'Goods Description'),
('goods_supplier', u'Goods Supplier'),
('goods_weight', u'Goods Weight'),
('goods_w', u'Goods Wide'),
('goods_d', u'Goods Depth'),
('goods_h', u'Goods Height'),
('unit_volume', u'Unit Volume'),
('goods_unit', u'Goods Unit'),
('goods_class', u'Goods Class'),
('goods_brand', u'Goods Brand'),
('goods_color', u'Goods Color'),
('goods_shape', u'Goods Shape'),
('goods_specs', u'Goods Specs'),
('goods_origin', u'Goods Origin'),
('goods_cost', u'Goods Cost'),
('goods_price', u'Goods Price'),
('creater', u'Creater'),
('create_time', u'Create Time'),
('update_time', u'Update Time')
]) | null |
156,560 | from django.contrib import admin
from django.conf import settings
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from django.contrib.staticfiles.views import serve
from django.views.static import serve as static_serve
from drf_spectacular.views import SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView
from . import views
def return_static(request, path, insecure=True, **kwargs):
return serve(request, path, insecure, **kwargs) | null |
156,561 | import os
from django.core.asgi import get_asgi_application
from utils.websocket import websocket_application
from asgihandler.core import ASGIHandler
http_application = get_asgi_application()
async def websocket_application(scope, receive, send):
async def application(scope, receive, send):
if scope['type'] in ['http', 'https']:
ASGIHandler.asgi_get_handler(scope)
await http_application(scope, receive, send)
elif scope['type'] in ['websocket']:
await websocket_application(scope, receive, send)
else:
raise Exception('Unknown Type' + scope['type']) | null |
156,562 | from django.http import StreamingHttpResponse, JsonResponse
from django.conf import settings
from wsgiref.util import FileWrapper
from rest_framework.exceptions import APIException
import mimetypes, os
import mimetypes, os, requests, django
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
def robots(request):
path = settings.BASE_DIR + request.path_info
content_type, encoding = mimetypes.guess_type(path)
resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)
resp['Cache-Control'] = "max-age=864000000000"
return resp | null |
156,563 | from django.http import StreamingHttpResponse, JsonResponse
from django.conf import settings
from wsgiref.util import FileWrapper
from rest_framework.exceptions import APIException
import mimetypes, os
import mimetypes, os, requests, django
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
def favicon(request):
path = str(settings.BASE_DIR) + '/static/img/logo.png'
content_type, encoding = mimetypes.guess_type(path)
resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)
resp['Cache-Control'] = "max-age=864000000000"
return resp | null |
156,564 | from django.http import StreamingHttpResponse, JsonResponse
from django.conf import settings
from wsgiref.util import FileWrapper
from rest_framework.exceptions import APIException
import mimetypes, os
import mimetypes, os, requests, django
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
def css(request):
path = str(settings.BASE_DIR) + '/templates/dist/spa' + request.path_info
content_type, encoding = mimetypes.guess_type(path)
resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)
resp['Cache-Control'] = "max-age=864000000000"
return resp | null |
156,565 | from django.http import StreamingHttpResponse, JsonResponse
from django.conf import settings
from wsgiref.util import FileWrapper
from rest_framework.exceptions import APIException
import mimetypes, os
import mimetypes, os, requests, django
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
def js(request):
path = str(settings.BASE_DIR) + '/templates/dist/spa' + request.path_info
content_type, encoding = mimetypes.guess_type(path)
resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)
resp['Cache-Control'] = "max-age=864000000000"
return resp | null |
156,566 | from django.http import StreamingHttpResponse, JsonResponse
from django.conf import settings
from wsgiref.util import FileWrapper
from rest_framework.exceptions import APIException
import mimetypes, os
import mimetypes, os, requests, django
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
def statics(request):
path = str(settings.BASE_DIR) + '/templates/dist/spa' + request.path_info
content_type, encoding = mimetypes.guess_type(path)
resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)
resp['Cache-Control'] = "max-age=864000000000"
return resp | null |
156,567 | from django.http import StreamingHttpResponse, JsonResponse
from django.conf import settings
from wsgiref.util import FileWrapper
from rest_framework.exceptions import APIException
import mimetypes, os
import mimetypes, os, requests, django
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
def fonts(request):
path = str(settings.BASE_DIR) + '/templates/dist/spa' + request.path_info
content_type, encoding = mimetypes.guess_type(path)
resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)
resp['Cache-Control'] = "max-age=864000000000"
return resp | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.