id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
160,049 | import gradio as gr
from annotator.util import resize_image, HWC3
model_openpose = None
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
class OpenposeDetector:
def __init__(self):
body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth")
hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth")
face_modelpath = os.path.join(annotator_ckpts_path, "facenet.pth")
if not os.path.exists(body_modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(body_model_path, model_dir=annotator_ckpts_path)
if not os.path.exists(hand_modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path)
if not os.path.exists(face_modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(face_model_path, model_dir=annotator_ckpts_path)
self.body_estimation = Body(body_modelpath)
self.hand_estimation = Hand(hand_modelpath)
self.face_estimation = Face(face_modelpath)
def __call__(self, oriImg, hand_and_face=False, return_is_index=False):
oriImg = oriImg[:, :, ::-1].copy()
H, W, C = oriImg.shape
with torch.no_grad():
candidate, subset = self.body_estimation(oriImg)
hands = []
faces = []
if hand_and_face:
# Hand
hands_list = util.handDetect(candidate, subset, oriImg)
for x, y, w, is_left in hands_list:
peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :]).astype(np.float32)
if peaks.ndim == 2 and peaks.shape[1] == 2:
peaks[:, 0] = np.where(peaks[:, 0] < 1e-6, -1, peaks[:, 0] + x) / float(W)
peaks[:, 1] = np.where(peaks[:, 1] < 1e-6, -1, peaks[:, 1] + y) / float(H)
hands.append(peaks.tolist())
# Face
faces_list = util.faceDetect(candidate, subset, oriImg)
for x, y, w in faces_list:
heatmaps = self.face_estimation(oriImg[y:y+w, x:x+w, :])
peaks = self.face_estimation.compute_peaks_from_heatmaps(heatmaps).astype(np.float32)
if peaks.ndim == 2 and peaks.shape[1] == 2:
peaks[:, 0] = np.where(peaks[:, 0] < 1e-6, -1, peaks[:, 0] + x) / float(W)
peaks[:, 1] = np.where(peaks[:, 1] < 1e-6, -1, peaks[:, 1] + y) / float(H)
faces.append(peaks.tolist())
if candidate.ndim == 2 and candidate.shape[1] == 4:
candidate = candidate[:, :2]
candidate[:, 0] /= float(W)
candidate[:, 1] /= float(H)
bodies = dict(candidate=candidate.tolist(), subset=subset.tolist())
pose = dict(bodies=bodies, hands=hands, faces=faces)
if return_is_index:
return pose
else:
return draw_pose(pose, H, W)
def openpose(img, res, hand_and_face):
img = resize_image(HWC3(img), res)
global model_openpose
if model_openpose is None:
from annotator.openpose import OpenposeDetector
model_openpose = OpenposeDetector()
result = model_openpose(img, hand_and_face)
return [result] | null |
160,050 | import gradio as gr
from annotator.util import resize_image, HWC3
model_uniformer = None
def HWC3(x):
def resize_image(input_image, resolution):
class UniformerDetector:
def __init__(self):
def __call__(self, img):
def uniformer(img, res):
img = resize_image(HWC3(img), res)
global model_uniformer
if model_uniformer is None:
from annotator.uniformer import UniformerDetector
model_uniformer = UniformerDetector()
result = model_uniformer(img)
return [result] | null |
160,051 | import gradio as gr
from annotator.util import resize_image, HWC3
model_lineart_anime = None
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
class LineartAnimeDetector:
def __init__(self):
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/netG.pth"
modelpath = os.path.join(annotator_ckpts_path, "netG.pth")
if not os.path.exists(modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
net = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False)
ckpt = torch.load(modelpath)
for key in list(ckpt.keys()):
if 'module.' in key:
ckpt[key.replace('module.', '')] = ckpt[key]
del ckpt[key]
net.load_state_dict(ckpt)
net = net.cuda()
net.eval()
self.model = net
def __call__(self, input_image):
H, W, C = input_image.shape
Hn = 256 * int(np.ceil(float(H) / 256.0))
Wn = 256 * int(np.ceil(float(W) / 256.0))
img = cv2.resize(input_image, (Wn, Hn), interpolation=cv2.INTER_CUBIC)
with torch.no_grad():
image_feed = torch.from_numpy(img).float().cuda()
image_feed = image_feed / 127.5 - 1.0
image_feed = rearrange(image_feed, 'h w c -> 1 c h w')
line = self.model(image_feed)[0, 0] * 127.5 + 127.5
line = line.cpu().numpy()
line = cv2.resize(line, (W, H), interpolation=cv2.INTER_CUBIC)
line = line.clip(0, 255).astype(np.uint8)
return line
def lineart_anime(img, res):
img = resize_image(HWC3(img), res)
global model_lineart_anime
if model_lineart_anime is None:
from annotator.lineart_anime import LineartAnimeDetector
model_lineart_anime = LineartAnimeDetector()
result = model_lineart_anime(img)
return [result] | null |
160,052 | import gradio as gr
from annotator.util import resize_image, HWC3
model_lineart = None
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
class LineartDetector:
def __init__(self):
self.model = self.load_model('sk_model.pth')
self.model_coarse = self.load_model('sk_model2.pth')
def load_model(self, name):
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/" + name
modelpath = os.path.join(annotator_ckpts_path, name)
if not os.path.exists(modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
model = Generator(3, 1, 3)
model.load_state_dict(torch.load(modelpath, map_location=torch.device('cpu')))
model.eval()
model = model.cuda()
return model
def __call__(self, input_image, coarse):
model = self.model_coarse if coarse else self.model
assert input_image.ndim == 3
image = input_image
with torch.no_grad():
image = torch.from_numpy(image).float().cuda()
image = image / 255.0
image = rearrange(image, 'h w c -> 1 c h w')
line = model(image)[0][0]
line = line.cpu().numpy()
line = (line * 255.0).clip(0, 255).astype(np.uint8)
return line
def lineart(img, res, coarse=False):
img = resize_image(HWC3(img), res)
global model_lineart
if model_lineart is None:
from annotator.lineart import LineartDetector
model_lineart = LineartDetector()
result = model_lineart(img, coarse)
return [result] | null |
160,053 | import gradio as gr
from annotator.util import resize_image, HWC3
model_oneformer_coco = None
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
class OneformerCOCODetector:
def __init__(self):
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/150_16_swin_l_oneformer_coco_100ep.pth"
modelpath = os.path.join(annotator_ckpts_path, "150_16_swin_l_oneformer_coco_100ep.pth")
if not os.path.exists(modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
config = os.path.join(os.path.dirname(__file__), 'configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml')
self.model, self.meta = make_detectron2_model(config, modelpath)
def __call__(self, img):
return semantic_run(img, self.model, self.meta)
def oneformer_coco(img, res):
img = resize_image(HWC3(img), res)
global model_oneformer_coco
if model_oneformer_coco is None:
from annotator.oneformer import OneformerCOCODetector
model_oneformer_coco = OneformerCOCODetector()
result = model_oneformer_coco(img)
return [result] | null |
160,054 | import gradio as gr
from annotator.util import resize_image, HWC3
model_oneformer_ade20k = None
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
class OneformerADE20kDetector:
def __init__(self):
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/250_16_swin_l_oneformer_ade20k_160k.pth"
modelpath = os.path.join(annotator_ckpts_path, "250_16_swin_l_oneformer_ade20k_160k.pth")
if not os.path.exists(modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
config = os.path.join(os.path.dirname(__file__), 'configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml')
self.model, self.meta = make_detectron2_model(config, modelpath)
def __call__(self, img):
return semantic_run(img, self.model, self.meta)
def oneformer_ade20k(img, res):
img = resize_image(HWC3(img), res)
global model_oneformer_ade20k
if model_oneformer_ade20k is None:
from annotator.oneformer import OneformerADE20kDetector
model_oneformer_ade20k = OneformerADE20kDetector()
result = model_oneformer_ade20k(img)
return [result] | null |
160,055 | import gradio as gr
from annotator.util import resize_image, HWC3
model_content_shuffler = None
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
class ContentShuffleDetector:
def __call__(self, img, h=None, w=None, f=None):
H, W, C = img.shape
if h is None:
h = H
if w is None:
w = W
if f is None:
f = 256
x = make_noise_disk(h, w, 1, f) * float(W - 1)
y = make_noise_disk(h, w, 1, f) * float(H - 1)
flow = np.concatenate([x, y], axis=2).astype(np.float32)
return cv2.remap(img, flow, None, cv2.INTER_LINEAR)
def content_shuffler(img, res):
img = resize_image(HWC3(img), res)
global model_content_shuffler
if model_content_shuffler is None:
from annotator.shuffle import ContentShuffleDetector
model_content_shuffler = ContentShuffleDetector()
result = model_content_shuffler(img)
return [result] | null |
160,056 | import gradio as gr
from annotator.util import resize_image, HWC3
model_color_shuffler = None
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
class ColorShuffleDetector:
def __call__(self, img):
H, W, C = img.shape
F = random.randint(64, 384)
A = make_noise_disk(H, W, 3, F)
B = make_noise_disk(H, W, 3, F)
C = (A + B) / 2.0
A = (C + (A - C) * 3.0).clip(0, 1)
B = (C + (B - C) * 3.0).clip(0, 1)
L = img.astype(np.float32) / 255.0
Y = A * L + B * (1 - L)
Y -= np.min(Y, axis=(0, 1), keepdims=True)
Y /= np.maximum(np.max(Y, axis=(0, 1), keepdims=True), 1e-5)
Y *= 255.0
return Y.clip(0, 255).astype(np.uint8)
def color_shuffler(img, res):
img = resize_image(HWC3(img), res)
global model_color_shuffler
if model_color_shuffler is None:
from annotator.shuffle import ColorShuffleDetector
model_color_shuffler = ColorShuffleDetector()
result = model_color_shuffler(img)
return [result] | null |
160,057 | from share import *
import config
import cv2
import einops
import gradio as gr
import numpy as np
import torch
import random
from pytorch_lightning import seed_everything
from annotator.util import resize_image, HWC3
from annotator.hed import HEDdetector
from annotator.pidinet import PidiNetDetector
from annotator.util import nms
from cldm.model import create_model, load_state_dict
from cldm.ddim_hacked import DDIMSampler
preprocessor = None
model = create_model(f'./models/{model_name}.yaml').cpu()
model.load_state_dict(load_state_dict('./models/v1-5-pruned.ckpt', location='cuda'), strict=False)
model.load_state_dict(load_state_dict(f'./models/{model_name}.pth', location='cuda'), strict=False)
model = model.cuda()
ddim_sampler = DDIMSampler(model)
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
def nms(x, t, s):
x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
y = np.zeros_like(x)
for f in [f1, f2, f3, f4]:
np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
z = np.zeros_like(y, dtype=np.uint8)
z[y > t] = 255
return z
class HEDdetector:
def __init__(self):
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth"
modelpath = os.path.join(annotator_ckpts_path, "ControlNetHED.pth")
if not os.path.exists(modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
self.netNetwork = ControlNetHED_Apache2().float().cuda().eval()
self.netNetwork.load_state_dict(torch.load(modelpath))
def __call__(self, input_image, safe=False):
assert input_image.ndim == 3
H, W, C = input_image.shape
with torch.no_grad():
image_hed = torch.from_numpy(input_image.copy()).float().cuda()
image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
edges = self.netNetwork(image_hed)
edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges]
edges = np.stack(edges, axis=2)
edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
if safe:
edge = safe_step(edge)
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
return edge
class PidiNetDetector:
def __init__(self):
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/table5_pidinet.pth"
modelpath = os.path.join(annotator_ckpts_path, "table5_pidinet.pth")
if not os.path.exists(modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
self.netNetwork = pidinet()
self.netNetwork.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(modelpath)['state_dict'].items()})
self.netNetwork = self.netNetwork.cuda()
self.netNetwork.eval()
def __call__(self, input_image, safe=False):
assert input_image.ndim == 3
input_image = input_image[:, :, ::-1].copy()
with torch.no_grad():
image_pidi = torch.from_numpy(input_image).float().cuda()
image_pidi = image_pidi / 255.0
image_pidi = rearrange(image_pidi, 'h w c -> 1 c h w')
edge = self.netNetwork(image_pidi)[-1]
edge = edge.cpu().numpy()
if safe:
edge = safe_step(edge)
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
return edge[0][0]
def process(det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
global preprocessor
if 'HED' in det:
if not isinstance(preprocessor, HEDdetector):
preprocessor = HEDdetector()
if 'PIDI' in det:
if not isinstance(preprocessor, PidiNetDetector):
preprocessor = PidiNetDetector()
with torch.no_grad():
input_image = HWC3(input_image)
if det == 'None':
detected_map = input_image.copy()
else:
detected_map = preprocessor(resize_image(input_image, detect_resolution))
detected_map = HWC3(detected_map)
img = resize_image(input_image, image_resolution)
H, W, C = img.shape
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
detected_map = nms(detected_map, 127, 3.0)
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
detected_map[detected_map > 4] = 255
detected_map[detected_map < 255] = 0
control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
control = torch.stack([control for _ in range(num_samples)], dim=0)
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
if seed == -1:
seed = random.randint(0, 65535)
seed_everything(seed)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
shape = (4, H // 8, W // 8)
if config.save_memory:
model.low_vram_shift(is_diffusing=True)
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
# Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
shape, cond, verbose=False, eta=eta,
unconditional_guidance_scale=scale,
unconditional_conditioning=un_cond)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
x_samples = model.decode_first_stage(samples)
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
results = [x_samples[i] for i in range(num_samples)]
return [detected_map] + results | null |
160,167 | import json
import os
import re
import numpy as np
import requests
import tensorflow as tf
from tqdm import tqdm
from encoder import get_encoder
def download_gpt2_files(model_size, model_dir):
assert model_size in ["124M", "355M", "774M", "1558M"]
for filename in [
"checkpoint",
"encoder.json",
"hparams.json",
"model.ckpt.data-00000-of-00001",
"model.ckpt.index",
"model.ckpt.meta",
"vocab.bpe",
]:
url = "https://openaipublic.blob.core.windows.net/gpt-2/models"
r = requests.get(f"{url}/{model_size}/{filename}", stream=True)
r.raise_for_status()
with open(os.path.join(model_dir, filename), "wb") as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(
ncols=100,
desc="Fetching " + filename,
total=file_size,
unit_scale=True,
unit="b",
) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
def load_gpt2_params_from_tf_ckpt(tf_ckpt_path, hparams):
def set_in_nested_dict(d, keys, val):
if not keys:
return val
if keys[0] not in d:
d[keys[0]] = {}
d[keys[0]] = set_in_nested_dict(d[keys[0]], keys[1:], val)
return d
params = {"blocks": [{} for _ in range(hparams["n_layer"])]}
for name, _ in tf.train.list_variables(tf_ckpt_path):
array = np.squeeze(tf.train.load_variable(tf_ckpt_path, name))
name = name[len("model/") :]
if name.startswith("h"):
m = re.match(r"h([0-9]+)/(.*)", name)
n = int(m[1])
sub_name = m[2]
set_in_nested_dict(params["blocks"][n], sub_name.split("/"), array)
else:
set_in_nested_dict(params, name.split("/"), array)
return params
def get_encoder(model_name, models_dir):
with open(os.path.join(models_dir, model_name, "encoder.json"), "r") as f:
encoder = json.load(f)
with open(os.path.join(models_dir, model_name, "vocab.bpe"), "r", encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]]
return Encoder(encoder=encoder, bpe_merges=bpe_merges)
def load_encoder_hparams_and_params(model_size, models_dir):
assert model_size in ["124M", "355M", "774M", "1558M"]
model_dir = os.path.join(models_dir, model_size)
tf_ckpt_path = tf.train.latest_checkpoint(model_dir)
if not tf_ckpt_path: # download files if necessary
os.makedirs(model_dir, exist_ok=True)
download_gpt2_files(model_size, model_dir)
tf_ckpt_path = tf.train.latest_checkpoint(model_dir)
encoder = get_encoder(model_size, models_dir)
hparams = json.load(open(os.path.join(model_dir, "hparams.json")))
params = load_gpt2_params_from_tf_ckpt(tf_ckpt_path, hparams)
return encoder, hparams, params | null |
160,168 | import numpy as np
def gpt2(inputs, wte, wpe, blocks, ln_f, n_head):
x = wte[inputs] + wpe[range(len(inputs))]
for block in blocks:
x = transformer_block(x, **block, n_head=n_head)
return layer_norm(x, **ln_f) @ wte.T
def generate(inputs, params, n_head, n_tokens_to_generate):
from tqdm import tqdm
for _ in tqdm(range(n_tokens_to_generate), "generating"):
logits = gpt2(inputs, **params, n_head=n_head)
next_id = np.argmax(logits[-1])
inputs.append(int(next_id))
return inputs[len(inputs) - n_tokens_to_generate :] | null |
160,169 | import numpy as np
def gpt2(inputs, wte, wpe, blocks, ln_f, n_head): # [n_seq] -> [n_seq, n_vocab]
# token + positional embeddings
x = wte[inputs] + wpe[range(len(inputs))] # [n_seq] -> [n_seq, n_embd]
# forward pass through n_layer transformer blocks
for block in blocks:
x = transformer_block(x, **block, n_head=n_head) # [n_seq, n_embd] -> [n_seq, n_embd]
# projection to vocab
x = layer_norm(x, **ln_f) # [n_seq, n_embd] -> [n_seq, n_embd]
return x @ wte.T # [n_seq, n_embd] -> [n_seq, n_vocab]
def generate(inputs, params, n_head, n_tokens_to_generate):
from tqdm import tqdm
for _ in tqdm(range(n_tokens_to_generate), "generating"): # auto-regressive decode loop
logits = gpt2(inputs, **params, n_head=n_head) # model forward pass
next_id = np.argmax(logits[-1]) # greedy sampling
inputs.append(int(next_id)) # append prediction to input
return inputs[len(inputs) - n_tokens_to_generate :] # only return generated ids | null |
160,170 | import json
import os
from functools import lru_cache
import regex as re
The provided code snippet includes necessary dependencies for implementing the `bytes_to_unicode` function. Write a Python function `def bytes_to_unicode()` to solve the following problem:
Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
Here is the function:
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs)) | Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. |
160,172 | import csv
import logging
from pathlib import Path
import numpy as np
from tqdm import tqdm
from epe.dataset.utils import load_crops
logger = logging.getLogger('epe.matching.filter')
The provided code snippet includes necessary dependencies for implementing the `load_matching_crops` function. Write a Python function `def load_matching_crops(path)` to solve the following problem:
Loads pairs of crops from a csv file.
Here is the function:
def load_matching_crops(path):
""" Loads pairs of crops from a csv file. """
logger.debug(f'Loading cached crop matches from "{path}" ...')
src_crops = []
dst_crops = []
with open(path) as file:
reader = csv.DictReader(file)
for row in reader:
src_crops.append((row['src_path'], int(row['src_r0']), int(row['src_r1']), int(row['src_c0']), int(row['src_c1'])))
dst_crops.append((row['dst_path'], int(row['dst_r0']), int(row['dst_r1']), int(row['dst_c0']), int(row['dst_c1'])))
pass
pass
logger.debug(f'Loaded {len(src_crops)} crop matches.')
return src_crops, dst_crops | Loads pairs of crops from a csv file. |
160,173 | import csv
import logging
from pathlib import Path
import numpy as np
from tqdm import tqdm
from epe.dataset.utils import load_crops
The provided code snippet includes necessary dependencies for implementing the `save_matching_crops` function. Write a Python function `def save_matching_crops(src_crops, dst_crops, path)` to solve the following problem:
Saves pairs of matched crops to a csv file.
Here is the function:
def save_matching_crops(src_crops, dst_crops, path):
""" Saves pairs of matched crops to a csv file. """
with open(path, 'w', newline='') as csvfile:
fieldnames = ['src_path', 'src_r0', 'src_r1', 'src_c0', 'src_c1', 'dst_path', 'dst_r0', 'dst_r1', 'dst_c0', 'dst_c1']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for src, dst in zip(src_crops, dst_crops):
writer.writerow({'src_path':src[0], 'src_r0':src[1], 'src_r1':src[2], 'src_c0':src[3], 'src_c1':src[4], 'dst_path':dst[0], 'dst_r0':dst[1], 'dst_r1':dst[2], 'dst_c0':dst[3], 'dst_c1':dst[4]})
pass
pass
pass | Saves pairs of matched crops to a csv file. |
160,174 | import csv
import logging
from pathlib import Path
import numpy as np
from tqdm import tqdm
from epe.dataset.utils import load_crops
logger = logging.getLogger('epe.matching.filter')
def load_crops(path):
""" Load crop info from a csv file.
The file is expected to have columns path,r0,r1,c0,c1
path -- Path to image
r0 -- top y coordinate
r1 -- bottom y coordinate
c0 -- left x coordinate
c1 -- right x coordinate
"""
path = Path(path)
if not path.exists():
logger.warn(f'Failed to load crops from {path} because it does not exist.')
return []
crops = []
with open(path) as file:
reader = csv.DictReader(file)
for row in tqdm(reader):
crops.append((row['path'], int(row['r0']), int(row['r1']), int(row['c0']), int(row['c1'])))
pass
pass
logger.debug(f'Loaded {len(crops)} crops.')
return crops
The provided code snippet includes necessary dependencies for implementing the `load_and_filter_matching_crops` function. Write a Python function `def load_and_filter_matching_crops(knn_path, src_crop_path, dst_crop_path, max_dist=1.0)` to solve the following problem:
Loads crop info from source and target datasets and knn matches between crops and filters the matches based on distance. knn_path -- Path to knn matches src_crop_path -- Path to csv file with crop info from source dataset. dst_crop_path -- Path to csv file with crop info from target dataset. max_dist -- maximum distance in feature space between neighbours.
Here is the function:
def load_and_filter_matching_crops(knn_path, src_crop_path, dst_crop_path, max_dist=1.0):
""" Loads crop info from source and target datasets and knn matches between crops and filters the matches based on distance.
knn_path -- Path to knn matches
src_crop_path -- Path to csv file with crop info from source dataset.
dst_crop_path -- Path to csv file with crop info from target dataset.
max_dist -- maximum distance in feature space between neighbours.
"""
logger.debug(f'Filtering matches from {knn_path}.')
logger.debug(f' Source crops from {src_crop_path}.')
logger.debug(f' Target crops from {dst_crop_path}.')
data = np.load(knn_path)
dst_distances = data['dist'] # may need to add more samples
dst_indices = data['ind']
logger.debug(f' Found {dst_distances.shape[0]} source crops with {dst_distances.shape[1]} neighbours each.')
all_src_crops = load_crops(src_crop_path)
all_dst_crops = load_crops(dst_crop_path)
# take only patches with small distance
src_ids, knn = np.nonzero(dst_distances < max_dist)
filtered_src_crops = []
filtered_dst_crops = []
for i in tqdm(range(src_ids.shape[0])):
src_id = int(src_ids[i])
dst_id = int(dst_indices[src_id, int(knn[i])])
filtered_src_crops.append(all_src_crops[src_id])
filtered_dst_crops.append(all_dst_crops[dst_id])
pass
return filtered_src_crops, filtered_dst_crops | Loads crop info from source and target datasets and knn matches between crops and filters the matches based on distance. knn_path -- Path to knn matches src_crop_path -- Path to csv file with crop info from source dataset. dst_crop_path -- Path to csv file with crop info from target dataset. max_dist -- maximum distance in feature space between neighbours. |
160,175 | import argparse
import csv
from pathlib import Path
import random
from imageio import imwrite
import numpy as np
import torch
from torchvision.utils import make_grid
from epe.dataset import ImageBatch, ImageDataset
from epe.dataset.utils import read_filelist
def load_crops(path):
paths = []
coords = []
with open(path) as file:
reader = csv.DictReader(file)
for row in reader:
paths.append(row['path'])
coords.append((int(row['r0']), int(row['r1']), int(row['c0']), int(row['c1'])))
pass
pass
return paths, coords | null |
160,176 | import argparse
from pathlib import Path
import numpy as np
import torch
import torch.utils.data
from tqdm import tqdm
from epe.dataset import ImageBatch, ImageDataset
from epe.dataset.utils import read_filelist
from epe.network import VGG16
def seed_worker(id):
np.random.seed(torch.initial_seed() % np.iinfo(np.int32).max)
pass | null |
160,177 | import csv
import logging
from pathlib import Path
import numpy as np
from skimage.transform import rescale
import torch
from tqdm import tqdm
logger = logging.getLogger('epe.dataset.utils')
The provided code snippet includes necessary dependencies for implementing the `read_filelist` function. Write a Python function `def read_filelist(path_to_filelist, num_expected_entries_per_row, check_if_exists=True)` to solve the following problem:
Loads a file with paths to multiple files per row. path_to_filelist -- path to text file num_expected_entries_per_row -- number of expected entries per row. check_if_exists -- checks each path.
Here is the function:
def read_filelist(path_to_filelist, num_expected_entries_per_row, check_if_exists=True):
""" Loads a file with paths to multiple files per row.
path_to_filelist -- path to text file
num_expected_entries_per_row -- number of expected entries per row.
check_if_exists -- checks each path.
"""
paths = []
num_skipped = 0
with open(path_to_filelist) as file:
for i, line in enumerate(file):
t = line.strip().split(',')
assert len(t) >= num_expected_entries_per_row, \
f'Expected at least {num_expected_entries_per_row} entries per line. Got {len(t)} instead in line {i} of {path_to_filelist}.'
ps = [Path(p) for p in t[:num_expected_entries_per_row]]
if check_if_exists:
skip = [p for p in ps if not p.exists()]
if skip:
num_skipped += 1
# logger.warn(f'Skipping {i}: {skip[0]} does not exist.')
continue
# assert p.exists(), f'Path {p} does not exist.'
pass
pass
paths.append(tuple(ps))
pass
pass
if num_skipped > 0:
logger.warn(f'Skipped {num_skipped} entries since at least one file was missing.')
return paths | Loads a file with paths to multiple files per row. path_to_filelist -- path to text file num_expected_entries_per_row -- number of expected entries per row. check_if_exists -- checks each path. |
160,178 | import csv
import logging
from pathlib import Path
import numpy as np
from skimage.transform import rescale
import torch
from tqdm import tqdm
def mat2tensor(mat):
t = torch.from_numpy(mat).float()
if mat.ndim == 2:
return t.unsqueeze(2).permute(2,0,1)
elif mat.ndim == 3:
return t.permute(2,0,1) | null |
160,179 | import csv
import logging
from pathlib import Path
import numpy as np
from skimage.transform import rescale
import torch
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `normalize_dim` function. Write a Python function `def normalize_dim(a, d)` to solve the following problem:
Normalize a along dimension d.
Here is the function:
def normalize_dim(a, d):
""" Normalize a along dimension d."""
return a.mul(a.pow(2).sum(dim=d,keepdim=True).clamp(min=0.00001).rsqrt()) | Normalize a along dimension d. |
160,180 | import csv
import logging
from pathlib import Path
import numpy as np
from skimage.transform import rescale
import torch
from tqdm import tqdm
def transform_identity(img):
return img | null |
160,181 | import csv
import logging
from pathlib import Path
import numpy as np
from skimage.transform import rescale
import torch
from tqdm import tqdm
def make_scale_transform(scale):
return lambda img: rescale(img, scale, preserve_range=True, anti_aliasing=True, multichannel=True) | null |
160,182 | import csv
import logging
from pathlib import Path
import numpy as np
from skimage.transform import rescale
import torch
from tqdm import tqdm
def make_scale_transform_w(target_width):
return lambda img: rescale(img, float(target_width) / img.shape[1], preserve_range=True, anti_aliasing=True, multichannel=True) | null |
160,183 | import csv
import logging
from pathlib import Path
import numpy as np
from skimage.transform import rescale
import torch
from tqdm import tqdm
def make_scale_transform_h(target_height):
return lambda img: rescale(img, float(target_height) / img.shape[0], preserve_range=True, anti_aliasing=True, multichannel=True) | null |
160,184 | import logging
import torch
def _safe_to(a, device):
return a.to(device, non_blocking=True) if a is not None else None | null |
160,185 | import logging
import torch
def _safe_expand(a):
return a if a is None or a.dim() == 4 else a.unsqueeze(0) | null |
160,186 | import logging
import torch
def _safe_cat(s, dim):
try:
return torch.cat(s, dim)
except TypeError:
return None | null |
160,188 | import argparse
from pathlib import Path
import numpy as np
import torch
import torch.utils.data
from tqdm import tqdm
from epe.dataset import ImageBatch, ImageDataset
from epe.dataset.utils import read_filelist
from epe.network import VGG16
def extract(img):
f = network.fw_relu(img, 3)[-1]
return network.relu_3[1](network.relu_3[0](f)) | null |
160,189 | import logging
from pathlib import Path
import imageio
import numpy as np
from skimage.transform import resize
import scipy.io as sio
import torch
from .batch_types import EPEBatch
from .synthetic import SyntheticDataset
from .utils import mat2tensor, normalize_dim
def center(x, m, s):
x[0,:,:] = (x[0,:,:] - m[0]) / s[0]
x[1,:,:] = (x[1,:,:] - m[1]) / s[1]
x[2,:,:] = (x[2,:,:] - m[2]) / s[2]
return x | null |
160,190 | import logging
from pathlib import Path
import imageio
import numpy as np
from skimage.transform import resize
import scipy.io as sio
import torch
from .batch_types import EPEBatch
from .synthetic import SyntheticDataset
from .utils import mat2tensor, normalize_dim
The provided code snippet includes necessary dependencies for implementing the `material_from_gt_label` function. Write a Python function `def material_from_gt_label(gt_labelmap)` to solve the following problem:
Merges several classes.
Here is the function:
def material_from_gt_label(gt_labelmap):
""" Merges several classes. """
h,w = gt_labelmap.shape
shader_map = np.zeros((h, w, 12), dtype=np.float32)
shader_map[:,:,0] = (gt_labelmap == 23).astype(np.float32) # sky
shader_map[:,:,1] = (np.isin(gt_labelmap, [6, 7, 8, 9, 10])).astype(np.float32) # road / static / sidewalk
shader_map[:,:,2] = (np.isin(gt_labelmap, [26,27,28,29,30,31,32,33])).astype(np.float32) # vehicle
shader_map[:,:,3] = (gt_labelmap == 22).astype(np.float32) # terrain
shader_map[:,:,4] = (gt_labelmap == 21).astype(np.float32) # vegetation
shader_map[:,:,5] = (np.isin(gt_labelmap, [24,25])).astype(np.float32) # person
shader_map[:,:,6] = (np.isin(gt_labelmap, [17,18])).astype(np.float32) # infrastructure
shader_map[:,:,7] = (gt_labelmap == 19).astype(np.float32) # traffic light
shader_map[:,:,8] = (gt_labelmap == 20).astype(np.float32) # traffic sign
shader_map[:,:,9] = (gt_labelmap == 1).astype(np.float32) # ego vehicle
shader_map[:,:,10] = (np.isin(gt_labelmap, [4, 11, 12, 13, 14, 15, 16])).astype(np.float32) # building
shader_map[:,:,11] = (np.isin(gt_labelmap, [0,5])).astype(np.float32) # unlabeled
return shader_map | Merges several classes. |
160,191 | import warnings
from argparse import ArgumentParser
import datetime
import logging
from pathlib import Path
import random
import imageio
import numpy as np
from skimage.transform import resize
import torch
import torch.utils.data
from torch import autograd
import kornia as K
import epe.utils
import epe.dataset as ds
import epe.network as nw
import epe.experiment as ee
from epe.matching import MatchedCrops, IndependentCrops
def tee_loss(x, y):
return x+y, y.detach() | null |
160,192 | import warnings
from argparse import ArgumentParser
import datetime
import logging
from pathlib import Path
import random
import imageio
import numpy as np
from skimage.transform import resize
import torch
import torch.utils.data
from torch import autograd
import kornia as K
import epe.utils
import epe.dataset as ds
import epe.network as nw
import epe.experiment as ee
from epe.matching import MatchedCrops, IndependentCrops
def accuracy(pred):
return (pred > 0.5).float().mean() | null |
160,193 | import warnings
from argparse import ArgumentParser
import datetime
import logging
from pathlib import Path
import random
import imageio
import numpy as np
from skimage.transform import resize
import torch
import torch.utils.data
from torch import autograd
import kornia as K
import epe.utils
import epe.dataset as ds
import epe.network as nw
import epe.experiment as ee
from epe.matching import MatchedCrops, IndependentCrops
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
logger = logging.getLogger('main')
The provided code snippet includes necessary dependencies for implementing the `real_penalty` function. Write a Python function `def real_penalty(loss, real_img)` to solve the following problem:
Compute penalty on real images.
Here is the function:
def real_penalty(loss, real_img):
''' Compute penalty on real images. '''
b = real_img.shape[0]
grad_out = autograd.grad(outputs=loss, inputs=[real_img], create_graph=True, retain_graph=True, only_inputs=True, allow_unused=True)
logger.debug(f'real_penalty: g:{grad_out[0].shape}')
reg_loss = torch.cat([g.pow(2).reshape(b, -1).sum(dim=1, keepdim=True) for g in grad_out if g is not None], 1).mean()
return reg_loss | Compute penalty on real images. |
160,194 | import datetime
import logging
import random
from pathlib import Path
import sys
import numpy as np
from scipy.io import savemat
import torch
from torch import autograd
import yaml
def seed_worker(id):
random.seed(torch.initial_seed() % np.iinfo(np.int32).max)
np.random.seed(torch.initial_seed() % np.iinfo(np.int32).max)
pass | null |
160,195 | import datetime
import logging
import random
from pathlib import Path
import sys
import numpy as np
from scipy.io import savemat
import torch
from torch import autograd
import yaml
def toggle_grad(model, requires_grad):
for p in model.parameters():
p.requires_grad_(requires_grad)
pass
pass | null |
160,196 | import datetime
import logging
import random
from pathlib import Path
import sys
import numpy as np
from scipy.io import savemat
import torch
from torch import autograd
import yaml
def parse_loglevel(loglevel_arg):
def init_logging(args):
now = datetime.datetime.now()
log_path = args.log_dir / f'{args.config.stem}_{datetime.date.today().isoformat()}_{now.hour}-{now.minute}-{now.second}.log'
level = parse_loglevel(args.log)
logging.basicConfig(level=level, format="%(asctime)s %(message)s", handlers=[logging.FileHandler(log_path, mode='a'), logging.StreamHandler()]) | null |
160,197 | import os
from struct import unpack
import numpy as np
import torch
import scipy.io as sio
def save(c, d, name=None):
def checknan(a, name, d=None):
if torch.any(torch.isnan(a)):
print('%s is nan.' % name)
if d is None:
save(True, {name:a})
else:
save(True, d)
exit() | null |
160,198 | import os
from struct import unpack
import numpy as np
import torch
import scipy.io as sio
def mat2tensor(mat):
t = torch.from_numpy(mat).float()
if mat.ndim == 2:
return t.unsqueeze(2).permute(2,0,1)
elif mat.ndim == 3:
return t.permute(2,0,1) | null |
160,199 | import os
from struct import unpack
import numpy as np
import torch
import scipy.io as sio
The provided code snippet includes necessary dependencies for implementing the `normalize_dim` function. Write a Python function `def normalize_dim(a, d)` to solve the following problem:
Normalize a along dimension d.
Here is the function:
def normalize_dim(a, d):
""" Normalize a along dimension d."""
return a.mul(a.pow(2).sum(dim=d,keepdim=True).clamp(min=0.00001).rsqrt()) | Normalize a along dimension d. |
160,200 | import os
from struct import unpack
import numpy as np
import torch
import scipy.io as sio
def cross3(a,b):
c = a.new_zeros(a.shape[0],3)
c[:,0] = a[:,1].mul(b[:,2]) - a[:,2].mul(b[:,1])
c[:,1] = a[:,2].mul(b[:,0]) - a[:,0].mul(b[:,2])
c[:,2] = a[:,0].mul(b[:,1]) - a[:,1].mul(b[:,0])
return c | null |
160,201 | import os
from struct import unpack
import numpy as np
import torch
import scipy.io as sio
def normalize_vec(a):
# assert a.shape[-1] == 3 || a.sh
return a.div(a.pow(2).sum(dim=-1,keepdim=True).sqrt()) | null |
160,202 | import logging
import torch
import torch.nn as nn
from torchvision import models
def norml2(x):
return x / x.pow(2).sum(dim=1,keepdim=True).sqrt() | null |
160,203 | import torch
import torch.nn as nn
def make_residual(img, x):
return torch.sigmoid(-torch.log(1 / img.clamp(min=0.001, max=0.999) - 1) + x) | null |
160,204 | import logging
from math import sqrt
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `make_conv_layer` function. Write a Python function `def make_conv_layer(dims, strides=1, leaky_relu=True, spectral=False, norm_factory=None, skip_final_relu=False, kernel=3)` to solve the following problem:
Make simple convolutional networks without downsampling. dims -- list with channel widths, where len(dims)-1 is the number of concolutional layers to create. strides -- stride of first convolution if int, else stride of each convolution, respectively leaky_relu -- yes or no (=use ReLU instead) spectral -- use spectral norm norm_factory -- function taking a channel width and returning a normalization layer. skip_final_relu -- don't use a relu at the end kernel -- width of kernel
Here is the function:
def make_conv_layer(dims, strides=1, leaky_relu=True, spectral=False, norm_factory=None, skip_final_relu=False, kernel=3):
""" Make simple convolutional networks without downsampling.
dims -- list with channel widths, where len(dims)-1 is the number of concolutional layers to create.
strides -- stride of first convolution if int, else stride of each convolution, respectively
leaky_relu -- yes or no (=use ReLU instead)
spectral -- use spectral norm
norm_factory -- function taking a channel width and returning a normalization layer.
skip_final_relu -- don't use a relu at the end
kernel -- width of kernel
"""
if type(strides) == int:
strides = [strides] + [1] * (len(dims)-2)
pass
c = nn.Conv2d(dims[0], dims[1], kernel, stride=strides[0], bias=spectral)
m = [] if kernel == 1 else [nn.ReplicationPad2d(kernel // 2)]
m += [c if not spectral else torch.nn.utils.spectral_norm(c)]
if norm_factory:
m += [norm_factory(dims[1])]
pass
m += [nn.LeakyReLU(0.2, inplace=True) if leaky_relu else nn.ReLU(inplace=True)]
num_convs = len(dims)-2
for i,di in enumerate(dims[2:]):
c = nn.Conv2d(dims[i+1], di, 3, stride=strides[i+1], bias=spectral)
if kernel > 1:
m += [nn.ReplicationPad2d(kernel // 2)]
m += [c if not spectral else torch.nn.utils.spectral_norm(c)]
if norm_factory:
m += [norm_factory(di)]
pass
if i == num_convs-1 and skip_final_relu:
continue
else:
m += [nn.LeakyReLU(0.2, inplace=True) if leaky_relu else nn.ReLU(inplace=True)]
pass
return nn.Sequential(*m) | Make simple convolutional networks without downsampling. dims -- list with channel widths, where len(dims)-1 is the number of concolutional layers to create. strides -- stride of first convolution if int, else stride of each convolution, respectively leaky_relu -- yes or no (=use ReLU instead) spectral -- use spectral norm norm_factory -- function taking a channel width and returning a normalization layer. skip_final_relu -- don't use a relu at the end kernel -- width of kernel |
160,205 | import torch
import torch.nn as nn
def _fw_ls_real(input):
return (1-input).pow(2) | null |
160,206 | import logging
import lpips
import torch
import torch.nn as nn
def vgg_munit(vgg, img, rec):
ff = torch.nn.functional.instance_norm(vgg.fw_relu(img, 13)[-1])
fn = torch.nn.functional.instance_norm(vgg.fw_relu(rec, 13)[-1])
vgg_imgs = []
vgg_imgs.append((ff-fn).pow(2).mean(dim=1,keepdim=True))
loss = vgg_imgs[-1].mean()
return loss, vgg_imgs | null |
160,207 | import logging
import lpips
import torch
import torch.nn as nn
def vgg_johnson(vgg, img, rec):
ff = vgg.fw_relu(img, 4)[-1]
fn = vgg.fw_relu(rec, 4)[-1]
vgg_imgs = []
vgg_imgs.append((ff-fn).pow(2).mean(dim=1,keepdim=True))
loss = vgg_imgs[-1].mean()
return loss, vgg_imgs | null |
160,208 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
import epe.network.gb_encoder as ge
import epe.network.network_factory as nf
def make_hrnet_config(num_stages):
hrnet_cfg = {}
for i in range(1,num_stages+1):
cfg = {}
cfg['NUM_MODULES'] = 1
cfg['NUM_BRANCHES'] = i
cfg['NUM_BLOCKS'] = [3]*i
cfg['NUM_CHANNELS'] = [16 * 2**j for j in range(0,i)]
cfg['BLOCK'] = 'BASIC'
cfg['FUSE_METHOD'] = 'SUM'
hrnet_cfg[f'STAGE{i}'] = cfg
pass
return hrnet_cfg
def make_ienet2(ie_config):
hrnet_config = make_hrnet_config(ie_config.get('num_stages', 6))
encoder_type = ie_config.get('encoder_type', 3)
encoder_type = GBufferEncoderType[encoder_type]
return HighResolutionNet(hrnet_config, ie_config)
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Sequential(nn.ReplicationPad2d(1), nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=True)) | 3x3 convolution with padding |
160,209 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
import epe.network.gb_encoder as ge
import epe.network.network_factory as nf
def make_hrnet_config(num_stages):
hrnet_cfg = {}
for i in range(1,num_stages+1):
cfg = {}
cfg['NUM_MODULES'] = 1
cfg['NUM_BRANCHES'] = i
cfg['NUM_BLOCKS'] = [3]*i
cfg['NUM_CHANNELS'] = [16 * 2**j for j in range(0,i)]
cfg['BLOCK'] = 'BASIC'
cfg['FUSE_METHOD'] = 'SUM'
hrnet_cfg[f'STAGE{i}'] = cfg
pass
return hrnet_cfg
def make_ienet2(ie_config):
hrnet_config = make_hrnet_config(ie_config.get('num_stages', 6))
encoder_type = ie_config.get('encoder_type', 3)
encoder_type = GBufferEncoderType[encoder_type]
return HighResolutionNet(hrnet_config, ie_config)
The provided code snippet includes necessary dependencies for implementing the `conv3x3s` function. Write a Python function `def conv3x3s(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3s(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Sequential(nn.ReplicationPad2d(1), torch.nn.utils.spectral_norm(nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=True))) | 3x3 convolution with padding |
160,210 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
import epe.network.gb_encoder as ge
import epe.network.network_factory as nf
def make_hrnet_config(num_stages):
hrnet_cfg = {}
for i in range(1,num_stages+1):
cfg = {}
cfg['NUM_MODULES'] = 1
cfg['NUM_BRANCHES'] = i
cfg['NUM_BLOCKS'] = [3]*i
cfg['NUM_CHANNELS'] = [16 * 2**j for j in range(0,i)]
cfg['BLOCK'] = 'BASIC'
cfg['FUSE_METHOD'] = 'SUM'
hrnet_cfg[f'STAGE{i}'] = cfg
pass
return hrnet_cfg
def make_ienet2(ie_config):
hrnet_config = make_hrnet_config(ie_config.get('num_stages', 6))
encoder_type = ie_config.get('encoder_type', 3)
encoder_type = GBufferEncoderType[encoder_type]
return HighResolutionNet(hrnet_config, ie_config)
def make_blocks_dict(gbuffer_norm, num_gbuffer_layers):
return {
'BASIC': (lambda *args, **kwargs: BasicBlock(*args, **{'norm_func':ge.gbuffer_norm_factory(gbuffer_norm, num_gbuffer_layers), **kwargs}), BasicBlock.expansion),
'BOTTLENECK': (lambda *args, **kwargs: Bottleneck(*args, **{'norm_func':ge.gbuffer_norm_factory(gbuffer_norm, num_gbuffer_layers), **kwargs}), Bottleneck.expansion)
} | null |
160,211 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
import epe.network.gb_encoder as ge
import epe.network.network_factory as nf
class GBufferEncoderType(Enum):
NONE = 0
CONCAT = 1
SPADE = 2
ENCODER = 3
pass
class HighResolutionNet(nn.Module):
def make_hrnet_config(num_stages):
hrnet_cfg = {}
for i in range(1,num_stages+1):
cfg = {}
cfg['NUM_MODULES'] = 1
cfg['NUM_BRANCHES'] = i
cfg['NUM_BLOCKS'] = [3]*i
cfg['NUM_CHANNELS'] = [16 * 2**j for j in range(0,i)]
cfg['BLOCK'] = 'BASIC'
cfg['FUSE_METHOD'] = 'SUM'
hrnet_cfg[f'STAGE{i}'] = cfg
pass
return hrnet_cfg
def make_ienet2(ie_config):
hrnet_config = make_hrnet_config(ie_config.get('num_stages', 6))
encoder_type = ie_config.get('encoder_type', 3)
encoder_type = GBufferEncoderType[encoder_type]
return HighResolutionNet(hrnet_config, ie_config)
def make_ienet2(ie_config):
hrnet_config = make_hrnet_config(ie_config.get('num_stages', 6))
encoder_type = ie_config.get('encoder_type', 3)
encoder_type = GBufferEncoderType[encoder_type]
return HighResolutionNet(hrnet_config, ie_config) | null |
160,212 | import logging
import torch
import torch.nn as nn
import epe.network.network_factory as nf
The provided code snippet includes necessary dependencies for implementing the `_append_downsampled_gbuffers` function. Write a Python function `def _append_downsampled_gbuffers(g_list, x_list)` to solve the following problem:
Dynamically downsample G-buffers, matching resolution in feature maps.
Here is the function:
def _append_downsampled_gbuffers(g_list, x_list):
""" Dynamically downsample G-buffers, matching resolution in feature maps."""
for i in range(len(g_list), len(x_list)):
g_list.append(torch.nn.functional.interpolate(g_list[i-1], size=[x_list[i].shape[-2],x_list[i].shape[-1]], mode='bilinear', align_corners=False))
pass
return g_list | Dynamically downsample G-buffers, matching resolution in feature maps. |
160,213 | import logging
import torch
import torch.nn as nn
import epe.network.network_factory as nf
The provided code snippet includes necessary dependencies for implementing the `_append_downsampled_shaders` function. Write a Python function `def _append_downsampled_shaders(s, s_list, x_list)` to solve the following problem:
Dynamically downsample G-buffers, matching resolution in feature maps.
Here is the function:
def _append_downsampled_shaders(s, s_list, x_list):
""" Dynamically downsample G-buffers, matching resolution in feature maps."""
if s.shape[1] != 1:
for i in range(len(s_list), len(x_list)):
s_list.append(torch.argmax(torch.nn.functional.interpolate(s, size=[x_list[i].shape[-2],x_list[i].shape[-1]], mode='bilinear', align_corners=False), dim=1, keepdims=True).long())
pass
else:
for i in range(len(s_list), len(x_list)):
s_list.append(torch.nn.functional.interpolate(s, size=[x_list[i].shape[-2],x_list[i].shape[-1]], mode='nearest').long())
pass
pass
return s_list | Dynamically downsample G-buffers, matching resolution in feature maps. |
160,214 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
import epe.network.gb_encoder as ge
import epe.network.network_factory as nf
def make_hrnet_config(num_stages):
hrnet_cfg = {}
for i in range(1,num_stages+1):
cfg = {}
cfg['NUM_MODULES'] = 1
cfg['NUM_BRANCHES'] = i
cfg['NUM_BLOCKS'] = [4]*i
cfg['NUM_CHANNELS'] = [16 * 2**j for j in range(0,i)]
cfg['BLOCK'] = 'BASIC'
cfg['FUSE_METHOD'] = 'SUM'
hrnet_cfg[f'STAGE{i}'] = cfg
pass
return hrnet_cfg
def make_ienet(ie_config):
hrnet_config = make_hrnet_config(ie_config.get('num_stages', 6))
encoder_type = ie_config.get('encoder_type', 3)
encoder_type = GBufferEncoderType[encoder_type]
return HighResolutionNet(hrnet_config, ie_config)
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Sequential(nn.ReplicationPad2d(1), nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=False)) | 3x3 convolution with padding |
160,215 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
import epe.network.gb_encoder as ge
import epe.network.network_factory as nf
def make_hrnet_config(num_stages):
hrnet_cfg = {}
for i in range(1,num_stages+1):
cfg = {}
cfg['NUM_MODULES'] = 1
cfg['NUM_BRANCHES'] = i
cfg['NUM_BLOCKS'] = [4]*i
cfg['NUM_CHANNELS'] = [16 * 2**j for j in range(0,i)]
cfg['BLOCK'] = 'BASIC'
cfg['FUSE_METHOD'] = 'SUM'
hrnet_cfg[f'STAGE{i}'] = cfg
pass
return hrnet_cfg
def make_ienet(ie_config):
hrnet_config = make_hrnet_config(ie_config.get('num_stages', 6))
encoder_type = ie_config.get('encoder_type', 3)
encoder_type = GBufferEncoderType[encoder_type]
return HighResolutionNet(hrnet_config, ie_config)
The provided code snippet includes necessary dependencies for implementing the `conv3x3s` function. Write a Python function `def conv3x3s(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3s(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Sequential(nn.ReplicationPad2d(1), torch.nn.utils.spectral_norm(nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=True))) | 3x3 convolution with padding |
160,216 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
import epe.network.gb_encoder as ge
import epe.network.network_factory as nf
def make_hrnet_config(num_stages):
def make_ienet(ie_config):
def make_blocks_dict(gbuffer_norm, num_gbuffer_layers):
return {
'BASIC': (lambda *args, **kwargs: BasicBlock(*args, **{'norm_func':ge.gbuffer_norm_factory(gbuffer_norm, num_gbuffer_layers), **kwargs}), BasicBlock.expansion),
'BOTTLENECK': (lambda *args, **kwargs: Bottleneck(*args, **{'norm_func':ge.gbuffer_norm_factory(gbuffer_norm, num_gbuffer_layers), **kwargs}), Bottleneck.expansion)
} | null |
160,217 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
import epe.network.gb_encoder as ge
import epe.network.network_factory as nf
class GBufferEncoderType(Enum):
class HighResolutionNet(nn.Module):
def make_hrnet_config(num_stages):
def make_ienet(ie_config):
def make_ienet(ie_config):
hrnet_config = make_hrnet_config(ie_config.get('num_stages', 6))
encoder_type = ie_config.get('encoder_type', 3)
encoder_type = GBufferEncoderType[encoder_type]
return HighResolutionNet(hrnet_config, ie_config) | null |
160,218 | import logging
import math
import pathlib
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.utils.spectral_norm as spectral_norm
from torch.nn import init
import torch.nn.functional as F
import kornia as K
import epe.network.network_factory as nf
import warnings
class ProjectionDiscriminator(nn.Module):
def __init__(self, dim_in, dim_base, max_dim, num_layers=3, num_groups=8, num_strides=3, dilate=False, no_out=False, cfg={}, hw=169):
"""
dim_in -- incoming channel width
dim_base -- channel width after first convolution
max_dim -- channel width is doubled every layer until max_dim
num_layers -- number of convolutional layers
norm -- batch, inst, group, spectral, domain, compare, compare2
num_groups -- number of groups for group_norm
num_strides -- how many layers should have stride 2 (counting from bottom)
dilate -- increasing dilation per layer
no_out -- no extra projection to channel width 1
"""
super(ProjectionDiscriminator, self).__init__()
norm = cfg.get('norm', 'group')
self._log = logging.getLogger('epe.network.proj_disc')
self._log.debug(f' Creating projection discriminator with {num_layers} layers and {norm} norm.')
dims = [dim_in] + [min(max_dim, dim_base*2**i) for i in range(num_layers)]
strides = [2]*num_strides + [1] * (num_layers+1-num_strides)
self.model = nf.make_conv_layer(dims, strides, True, norm=='spectral', nf.norm_factory[norm], False, 3)
dim_out = dims[3]
if no_out:
self.out = None
else:
self.out = nn.Sequential(nn.Conv2d(dim_out,dim_out,3,padding=1), nn.LeakyReLU(0.2, True), nn.Conv2d(dim_out, 1, 1))
self.num_layers = num_layers+1
self.embedding = nn.Embedding(194,dim_out)
self.num_layers = num_layers+1
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='leaky_relu')
elif isinstance(m, nn.Embedding):
nn.init.normal_(m.weight, std=0.01)
elif isinstance(m, (nn.GroupNorm, nn.BatchNorm2d, nn.InstanceNorm2d)):
try:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
except AttributeError:
pass
pass
pass
pass
def forward(self, t):
x,y = t
self._log.debug(f'disc.forward(x: {x.shape}, y: {y.shape})')
x = self.model(x)
_,c,h,w = x.shape
if y is not None:
if y.dtype == torch.int64:
# precomputed segmentation
_, _, hy, wy = y.shape
y = self.embedding(y.reshape(-1))
y = y.permute(1,0).reshape(1,c,hy,wy)
y = F.interpolate(y, (h, w), mode='bilinear', align_corners=True)
else:
y = F.interpolate(y, (h, w), mode='bilinear', align_corners=True)
y = torch.argmax(torch.nn.functional.softmax(y, dim=1), axis=1, keepdims=True)
y = self.embedding(y.reshape(-1))
y = y.permute(1,0).reshape(1,c,h,w)
pass
if self.out is not None:
y = (y * x).sum(dim=1,keepdims=True)
x = y + self.out(x)
else:
x = (y * x).sum(dim=1,keepdims=True)
else:
x = self.out(x)
return x
def make_disc_backbones(configs, cfg):
discs = []
for i, c in enumerate(configs):
(dim_in, dim_base, max_dim, num_layers, num_strides) = c
discs.append(ProjectionDiscriminator(dim_in=dim_in, dim_base=dim_base, max_dim=max_dim, \
num_layers=num_layers, num_strides=num_strides, dilate=False, \
no_out=False, cfg=cfg, hw=(169 if i < 7 else 144)))
return discs | null |
160,219 | import argparse
import re
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import pandas as pd
import requests
from datetime import date
def get_project_info(user, project, name, item, date_key, token=""):
header = {
"Accept": "application/vnd.github.v3.star+json"
}
if token:
header.update({
"Authorization": f"token {token}",
})
data_list = []
page = 0
date_pat = re.compile("\d{4}-\d{2}-\d{2}")
while True:
page += 1
url = f"https://api.github.com/repos/{user}/{project}/{item}?page={page}"
req = requests.get(url, headers=header)
datas = req.json()
if not datas:
break
data_list.extend([date_pat.match(i.get(date_key)).group() for i in datas])
date_dic = {}
start_date = min(data_list)
end_date = date.today()
for date_str in data_list:
if not date_dic.get(date_str):
date_dic[date_str] = 0
date_dic[date_str] += 1
date_list = pd.date_range(start_date, end_date)
star_num = 0
star_num_list = []
for date_str in date_list:
star_num += date_dic.get(str(date_str).split()[0], 0)
star_num_list.append(star_num)
data = {
"name": name,
"num_list": star_num_list,
"date_list": date_list,
}
return data | null |
160,220 | import argparse
import re
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import pandas as pd
import requests
from datetime import date
def create_svg(project, datas, save_path, theme=""):
fig, ax = plt.subplots(figsize=(12, 5))
# 设置透明
fig.patch.set_alpha(.0)
ax.patch.set_alpha(.0)
# 坐标
ax.tick_params(color='darkgrey', labelcolor='darkgrey')
# 坐标轴
plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color("darkgrey")
ax.spines['left'].set_color("darkgrey")
ax.spines['right'].set_color('none')
# 绘线
for data in datas:
date_list = data["date_list"]
num_list = data["num_list"]
name = data["name"]
ax.plot(date_list, num_list, label=name)
# 图例
ax.legend(
frameon=False,
loc=2,
bbox_to_anchor=(1.05, 0.0, 3.0, 0.0),
borderaxespad = 0.,
labelcolor='darkgrey'
)
# 标题
ax.set_title(f"{project} history", color='darkgrey')
# 网格
ax.grid(True, linestyle='-.')
plt.savefig(save_path) | null |
160,221 | import os
import re
import smtplib
import sys
import time
from email.mime.text import MIMEText
from email.utils import formataddr
import feedparser
import requests
requests.packages.urllib3.disable_warnings()
ok_code = [200, 201, 202, 203, 204, 205, 206]
= ["cnr.cn", "cyberpolice.cn", "gov.cn", "samr.gov.cn", "12321.cn"
"miit.gov.cn", "chinatcc.gov.cn"]
def write_log(content, level="INFO"):
date_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
update_log = f"[{date_str}] [{level}] {content}\n"
print(update_log)
with open(f'./log/{time.strftime("%Y-%m", time.localtime(time.time()))}-update.log', 'a', encoding="utf-8") as f:
f.write(update_log)
__ == '__main__':
main()
def get_subscribe_url():
dirs = './subscribe'
if not os.path.exists(dirs):
os.makedirs(dirs)
log_dir = "./log"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
rss = feedparser.parse('https://www.cfmem.com/feeds/posts/default?alt=rss')
entries = rss.get("entries")
if not entries:
write_log("更新失败!无法拉取原网站内容", "ERROR")
return
update_list = []
summary = entries[0].get("summary")
if not summary:
write_log("暂时没有可用的订阅更新", "WARN")
return
v2ray_list = re.findall(r"v2ray订阅链接:(.*?)</span>", summary)
# 获取普通订阅链接
if any(v2ray_list):
v2ray_url = v2ray_list[-1].replace('amp;', '')
v2ray_req = requests.request("GET", v2ray_url, verify=False)
v2ray_code = v2ray_req.status_code
if v2ray_code not in ok_code:
write_log(f"获取 v2ray 订阅失败:{v2ray_url} - {v2ray_code}", "WARN")
else:
update_list.append(f"v2ray: {v2ray_code}")
with open(dirs + '/v2ray.txt', 'w', encoding="utf-8") as f:
f.write(v2ray_req.text)
clash_list = re.findall(r"clash订阅链接:(.*?)</span>", summary)
# 获取clash订阅链接
if any(clash_list):
clash_url = clash_list[-1].replace('amp;', '')
clash_req = requests.request("GET", clash_url, verify=False)
clash_code = clash_req.status_code
if clash_code not in ok_code:
write_log(f"获取 clash 订阅失败:{clash_url} - {clash_code}", "WARN")
else:
update_list.append(f"clash: {clash_code}")
with open(dirs + '/clash.yml', 'w', encoding="utf-8") as f:
clash_content = clash_req.content.decode("utf-8")
f.write(clash_content)
if update_list:
file_pat = re.compile(r"v2ray\.txt|clash\.yml")
if file_pat.search(os.popen("git status").read()):
write_log(f"更新成功:{update_list}", "INFO")
else:
write_log(f"订阅暂未更新", "WARN")
else:
write_log(f"未能获取新的更新内容", "WARN") | null |
160,222 | import re
import sys
import glob
import pathlib
from datetime import datetime
import markdown
def sanitize(text: str) -> str:
return text.replace("<", "<").replace(">", ">")
def gen_feed(tips, url):
title_re = r"\* +(\*\*)?(?P<title>(?(1).+(?<!\*\*)|.+))(?(1)\*\*$|$)"
link_re = r"(?P<link>(?<!\[|\()https?://[\w\./\?&=\-\+#:,;%]+)"
xml = ""
for tip in tips:
link = tip["short_path"]
tip_num = link.replace(".md", "")
date = datetime.fromtimestamp(tip["pub_date"]).strftime("%a, %d %b %Y %H:%M:%S GMT")
title = re.search(title_re, tip["content"], re.RegexFlag.MULTILINE).group("title")
title = sanitize(title)
content = tip["content"]
updated = content
links_seen = set()
for match in re.finditer(link_re, content):
naked_link = match.group("link")
if naked_link not in links_seen:
updated = updated.replace(naked_link, f"[{naked_link}]({naked_link})")
links_seen.add(naked_link)
# TODO: find a way to colorize the code, the documentation advise to have custom css
# The classes should already have been applied to the code thanks to extension 'codehilite'
md = markdown.markdown(
f"<div markdown=1>{updated}</div>",
extensions=["extra", "codehilite", "nl2br"],
output_format="xhtml"
)
md = md.replace("<p></p>", "")
md = md.replace("<details open>", "<details open=true>")
md = "<![CDATA[ %s]]>" % md
xml += f"""<item>
<title>{tip_num} - {title}</title>
<link>{url}{link}</link>
<guid>{url}{link}</guid>
<pubDate>{date}</pubDate>
<description>{md}</description>
</item>\n"""
return xml | null |
160,223 | import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime
import time
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.utils import all_estimators
from sklearn.base import RegressorMixin
from sklearn.base import ClassifierMixin
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
roc_auc_score,
f1_score,
r2_score,
mean_squared_error,
)
import warnings
import xgboost
import lightgbm
The provided code snippet includes necessary dependencies for implementing the `get_card_split` function. Write a Python function `def get_card_split(df, cols, n=11)` to solve the following problem:
Splits categorical columns into 2 lists based on cardinality (i.e # of unique values) Parameters ---------- df : Pandas DataFrame DataFrame from which the cardinality of the columns is calculated. cols : list-like Categorical columns to list n : int, optional (default=11) The value of 'n' will be used to split columns. Returns ------- card_low : list-like Columns with cardinality < n card_high : list-like Columns with cardinality >= n
Here is the function:
def get_card_split(df, cols, n=11):
"""
Splits categorical columns into 2 lists based on cardinality (i.e # of unique values)
Parameters
----------
df : Pandas DataFrame
DataFrame from which the cardinality of the columns is calculated.
cols : list-like
Categorical columns to list
n : int, optional (default=11)
The value of 'n' will be used to split columns.
Returns
-------
card_low : list-like
Columns with cardinality < n
card_high : list-like
Columns with cardinality >= n
"""
cond = df[cols].nunique() > n
card_high = cols[cond]
card_low = cols[~cond]
return card_low, card_high | Splits categorical columns into 2 lists based on cardinality (i.e # of unique values) Parameters ---------- df : Pandas DataFrame DataFrame from which the cardinality of the columns is calculated. cols : list-like Categorical columns to list n : int, optional (default=11) The value of 'n' will be used to split columns. Returns ------- card_low : list-like Columns with cardinality < n card_high : list-like Columns with cardinality >= n |
160,224 | import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime
import time
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.compose import ColumnTransformer
from sklearn.utils import all_estimators
from sklearn.base import RegressorMixin
from sklearn.base import ClassifierMixin
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
roc_auc_score,
f1_score,
r2_score,
mean_squared_error,
)
import warnings
import xgboost
import lightgbm
def adjusted_rsquared(r2, n, p):
return 1 - (1 - r2) * ((n - 1) / (n - p - 1)) | null |
160,225 | import os
import sys
import warnings
from typing import List
import platform
import signal
import shutil
import argparse
import onnxruntime
import tensorflow
import roop.globals
import roop.metadata
import roop.ui as ui
from roop.predictor import predict_image, predict_video
from roop.processors.frame.core import get_frame_processors_modules
from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
def parse_args() -> None:
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
program = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=100))
program.add_argument('-s', '--source', help='select an source image', dest='source_path')
program.add_argument('-t', '--target', help='select an target image or video', dest='target_path')
program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
program.add_argument('--frame-processor', help='frame processors (choices: face_swapper, face_enhancer, ...)', dest='frame_processor', default=['face_swapper'], nargs='+')
program.add_argument('--keep-fps', help='keep target fps', dest='keep_fps', action='store_true')
program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true')
program.add_argument('--skip-audio', help='skip target audio', dest='skip_audio', action='store_true')
program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true')
program.add_argument('--reference-face-position', help='position of the reference face', dest='reference_face_position', type=int, default=0)
program.add_argument('--reference-frame-number', help='number of the reference frame', dest='reference_frame_number', type=int, default=0)
program.add_argument('--similar-face-distance', help='face distance used for recognition', dest='similar_face_distance', type=float, default=0.85)
program.add_argument('--temp-frame-format', help='image format used for frame extraction', dest='temp_frame_format', default='png', choices=['jpg', 'png'])
program.add_argument('--temp-frame-quality', help='image quality used for frame extraction', dest='temp_frame_quality', type=int, default=0, choices=range(101), metavar='[0-100]')
program.add_argument('--output-video-encoder', help='encoder used for the output video', dest='output_video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc'])
program.add_argument('--output-video-quality', help='quality used for the output video', dest='output_video_quality', type=int, default=35, choices=range(101), metavar='[0-100]')
program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int)
program.add_argument('--execution-provider', help='available execution provider (choices: cpu, ...)', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+')
program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads())
program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}')
args = program.parse_args()
roop.globals.source_path = args.source_path
roop.globals.target_path = args.target_path
roop.globals.output_path = normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path)
roop.globals.headless = roop.globals.source_path is not None and roop.globals.target_path is not None and roop.globals.output_path is not None
roop.globals.frame_processors = args.frame_processor
roop.globals.keep_fps = args.keep_fps
roop.globals.keep_frames = args.keep_frames
roop.globals.skip_audio = args.skip_audio
roop.globals.many_faces = args.many_faces
roop.globals.reference_face_position = args.reference_face_position
roop.globals.reference_frame_number = args.reference_frame_number
roop.globals.similar_face_distance = args.similar_face_distance
roop.globals.temp_frame_format = args.temp_frame_format
roop.globals.temp_frame_quality = args.temp_frame_quality
roop.globals.output_video_encoder = args.output_video_encoder
roop.globals.output_video_quality = args.output_video_quality
roop.globals.max_memory = args.max_memory
roop.globals.execution_providers = decode_execution_providers(args.execution_provider)
roop.globals.execution_threads = args.execution_threads
def limit_resources() -> None:
# prevent tensorflow memory leak
gpus = tensorflow.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tensorflow.config.experimental.set_virtual_device_configuration(gpu, [
tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)
])
# limit memory usage
if roop.globals.max_memory:
memory = roop.globals.max_memory * 1024 ** 3
if platform.system().lower() == 'darwin':
memory = roop.globals.max_memory * 1024 ** 6
if platform.system().lower() == 'windows':
import ctypes
kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
else:
import resource
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
def pre_check() -> bool:
if sys.version_info < (3, 9):
update_status('Python version is not supported - please upgrade to 3.9 or higher.')
return False
if not shutil.which('ffmpeg'):
update_status('ffmpeg is not installed.')
return False
return True
def start() -> None:
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
if not frame_processor.pre_start():
return
# process image to image
if has_image_extension(roop.globals.target_path):
if predict_image(roop.globals.target_path):
destroy()
shutil.copy2(roop.globals.target_path, roop.globals.output_path)
# process frame
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
update_status('Progressing...', frame_processor.NAME)
frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path)
frame_processor.post_process()
# validate image
if is_image(roop.globals.target_path):
update_status('Processing to image succeed!')
else:
update_status('Processing to image failed!')
return
# process image to videos
if predict_video(roop.globals.target_path):
destroy()
update_status('Creating temporary resources...')
create_temp(roop.globals.target_path)
# extract frames
if roop.globals.keep_fps:
fps = detect_fps(roop.globals.target_path)
update_status(f'Extracting frames with {fps} FPS...')
extract_frames(roop.globals.target_path, fps)
else:
update_status('Extracting frames with 30 FPS...')
extract_frames(roop.globals.target_path)
# process frame
temp_frame_paths = get_temp_frame_paths(roop.globals.target_path)
if temp_frame_paths:
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
update_status('Progressing...', frame_processor.NAME)
frame_processor.process_video(roop.globals.source_path, temp_frame_paths)
frame_processor.post_process()
else:
update_status('Frames not found...')
return
# create video
if roop.globals.keep_fps:
fps = detect_fps(roop.globals.target_path)
update_status(f'Creating video with {fps} FPS...')
create_video(roop.globals.target_path, fps)
else:
update_status('Creating video with 30 FPS...')
create_video(roop.globals.target_path)
# handle audio
if roop.globals.skip_audio:
move_temp(roop.globals.target_path, roop.globals.output_path)
update_status('Skipping audio...')
else:
if roop.globals.keep_fps:
update_status('Restoring audio...')
else:
update_status('Restoring audio might cause issues as fps are not kept...')
restore_audio(roop.globals.target_path, roop.globals.output_path)
# clean temp
update_status('Cleaning temporary resources...')
clean_temp(roop.globals.target_path)
# validate video
if is_video(roop.globals.target_path):
update_status('Processing to video succeed!')
else:
update_status('Processing to video failed!')
def destroy() -> None:
if roop.globals.target_path:
clean_temp(roop.globals.target_path)
sys.exit()
def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
global FRAME_PROCESSORS_MODULES
if not FRAME_PROCESSORS_MODULES:
for frame_processor in frame_processors:
frame_processor_module = load_frame_processor_module(frame_processor)
FRAME_PROCESSORS_MODULES.append(frame_processor_module)
return FRAME_PROCESSORS_MODULES
def run() -> None:
parse_args()
if not pre_check():
return
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
if not frame_processor.pre_check():
return
limit_resources()
if roop.globals.headless:
start()
else:
window = ui.init(start, destroy)
window.mainloop() | null |
160,226 | import threading
from typing import Any, Optional, List
import insightface
import numpy
import roop.globals
from roop.typing import Frame, Face
FACE_ANALYSER = None
def clear_face_analyser() -> Any:
global FACE_ANALYSER
FACE_ANALYSER = None | null |
160,227 | from typing import Any, List, Callable
import cv2
import threading
from gfpgan.utils import GFPGANer
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_many_faces
from roop.typing import Frame, Face
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
def conditional_download(download_directory_path: str, urls: List[str]) -> None:
if not os.path.exists(download_directory_path):
os.makedirs(download_directory_path)
for url in urls:
download_file_path = os.path.join(download_directory_path, os.path.basename(url))
if not os.path.exists(download_file_path):
request = urllib.request.urlopen(url) # type: ignore[attr-defined]
total = int(request.headers.get('Content-Length', 0))
with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress:
urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined]
def resolve_relative_path(path: str) -> str:
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
def pre_check() -> bool:
download_directory_path = resolve_relative_path('../models')
conditional_download(download_directory_path, ['https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth'])
return True | null |
160,228 | from typing import Any, List, Callable
import cv2
import threading
from gfpgan.utils import GFPGANer
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_many_faces
from roop.typing import Frame, Face
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
NAME = 'ROOP.FACE-ENHANCER'
def update_status(message: str, scope: str = 'ROOP.CORE') -> None:
print(f'[{scope}] {message}')
if not roop.globals.headless:
ui.update_status(message)
def is_image(image_path: str) -> bool:
if image_path and os.path.isfile(image_path):
mimetype, _ = mimetypes.guess_type(image_path)
return bool(mimetype and mimetype.startswith('image/'))
return False
def is_video(video_path: str) -> bool:
if video_path and os.path.isfile(video_path):
mimetype, _ = mimetypes.guess_type(video_path)
return bool(mimetype and mimetype.startswith('video/'))
return False
def pre_start() -> bool:
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
update_status('Select an image or video for target path.', NAME)
return False
return True | null |
160,229 | from typing import Any, List, Callable
import cv2
import threading
from gfpgan.utils import GFPGANer
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_many_faces
from roop.typing import Frame, Face
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
def clear_face_enhancer() -> None:
global FACE_ENHANCER
FACE_ENHANCER = None
def post_process() -> None:
clear_face_enhancer() | null |
160,230 | from typing import Any, List, Callable
import cv2
import threading
from gfpgan.utils import GFPGANer
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_many_faces
from roop.typing import Frame, Face
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame:
many_faces = get_many_faces(temp_frame)
if many_faces:
for target_face in many_faces:
temp_frame = enhance_face(target_face, temp_frame)
return temp_frame
def process_image(source_path: str, target_path: str, output_path: str) -> None:
target_frame = cv2.imread(target_path)
result = process_frame(None, None, target_frame)
cv2.imwrite(output_path, result) | null |
160,231 | from typing import Any, List, Callable
import cv2
import threading
from gfpgan.utils import GFPGANer
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_many_faces
from roop.typing import Frame, Face
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
for temp_frame_path in temp_frame_paths:
temp_frame = cv2.imread(temp_frame_path)
result = process_frame(None, None, temp_frame)
cv2.imwrite(temp_frame_path, result)
if update:
update()
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames) | null |
160,232 | from typing import Any, List, Callable
import cv2
import insightface
import threading
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_one_face, get_many_faces, find_similar_face
from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference
from roop.typing import Face, Frame
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
def conditional_download(download_directory_path: str, urls: List[str]) -> None: # type: ignore[attr-defined]
def resolve_relative_path(path: str) -> str:
def pre_check() -> bool:
download_directory_path = resolve_relative_path('../models')
conditional_download(download_directory_path, ['https://huggingface.co/CountFloyd/deepfake/resolve/main/inswapper_128.onnx'])
return True | null |
160,233 | from typing import Any, List, Callable
import cv2
import insightface
import threading
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_one_face, get_many_faces, find_similar_face
from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference
from roop.typing import Face, Frame
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
NAME = 'ROOP.FACE-SWAPPER'
def update_status(message: str, scope: str = 'ROOP.CORE') -> None:
print(f'[{scope}] {message}')
if not roop.globals.headless:
ui.update_status(message)
def get_one_face(frame: Frame, position: int = 0) -> Optional[Face]:
many_faces = get_many_faces(frame)
if many_faces:
try:
return many_faces[position]
except IndexError:
return many_faces[-1]
return None
def is_image(image_path: str) -> bool:
if image_path and os.path.isfile(image_path):
mimetype, _ = mimetypes.guess_type(image_path)
return bool(mimetype and mimetype.startswith('image/'))
return False
def is_video(video_path: str) -> bool:
if video_path and os.path.isfile(video_path):
mimetype, _ = mimetypes.guess_type(video_path)
return bool(mimetype and mimetype.startswith('video/'))
return False
def pre_start() -> bool:
if not is_image(roop.globals.source_path):
update_status('Select an image for source path.', NAME)
return False
elif not get_one_face(cv2.imread(roop.globals.source_path)):
update_status('No face in source path detected.', NAME)
return False
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
update_status('Select an image or video for target path.', NAME)
return False
return True | null |
160,234 | from typing import Any, List, Callable
import cv2
import insightface
import threading
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_one_face, get_many_faces, find_similar_face
from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference
from roop.typing import Face, Frame
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
def clear_face_swapper() -> None:
global FACE_SWAPPER
FACE_SWAPPER = None
def clear_face_reference() -> None:
global FACE_REFERENCE
FACE_REFERENCE = None
def post_process() -> None:
clear_face_swapper()
clear_face_reference() | null |
160,235 | from typing import Any, List, Callable
import cv2
import insightface
import threading
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_one_face, get_many_faces, find_similar_face
from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference
from roop.typing import Face, Frame
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame:
def get_one_face(frame: Frame, position: int = 0) -> Optional[Face]:
def process_image(source_path: str, target_path: str, output_path: str) -> None:
source_face = get_one_face(cv2.imread(source_path))
target_frame = cv2.imread(target_path)
reference_face = None if roop.globals.many_faces else get_one_face(target_frame, roop.globals.reference_face_position)
result = process_frame(source_face, reference_face, target_frame)
cv2.imwrite(output_path, result) | null |
160,236 | from typing import Any, List, Callable
import cv2
import insightface
import threading
import roop.globals
import roop.processors.frame.core
from roop.core import update_status
from roop.face_analyser import get_one_face, get_many_faces, find_similar_face
from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference
from roop.typing import Face, Frame
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
source_face = get_one_face(cv2.imread(source_path))
reference_face = None if roop.globals.many_faces else get_face_reference()
for temp_frame_path in temp_frame_paths:
temp_frame = cv2.imread(temp_frame_path)
result = process_frame(source_face, reference_face, temp_frame)
cv2.imwrite(temp_frame_path, result)
if update:
update()
def get_one_face(frame: Frame, position: int = 0) -> Optional[Face]:
many_faces = get_many_faces(frame)
if many_faces:
try:
return many_faces[position]
except IndexError:
return many_faces[-1]
return None
def get_face_reference() -> Optional[Face]:
return FACE_REFERENCE
def set_face_reference(face: Face) -> None:
global FACE_REFERENCE
FACE_REFERENCE = face
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
if not roop.globals.many_faces and not get_face_reference():
reference_frame = cv2.imread(temp_frame_paths[roop.globals.reference_frame_number])
reference_face = get_one_face(reference_frame, roop.globals.reference_face_position)
set_face_reference(reference_face)
roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames) | null |
160,237 | import json
import os
import subprocess
import sys
import zlib
from palworld_save_tools.gvas import GvasFile
from palworld_save_tools.palsav import compress_gvas_to_sav, decompress_sav_to_gvas
from palworld_save_tools.paltypes import PALWORLD_CUSTOM_PROPERTIES, PALWORLD_TYPE_HINTS
def json_to_sav(json_data, output_filepath):
print(f'Converting JSON to {output_filepath}...', end='', flush=True)
gvas_file = GvasFile.load(json_data)
if (
'Pal.PalWorldSaveGame' in gvas_file.header.save_game_class_name
or 'Pal.PalLocalWorldSaveGame' in gvas_file.header.save_game_class_name
):
save_type = 0x32
else:
save_type = 0x31
sav_file = compress_gvas_to_sav(
gvas_file.write(PALWORLD_CUSTOM_PROPERTIES), save_type
)
with open(output_filepath, 'wb') as f:
f.write(sav_file)
print('Done!', flush=True) | null |
160,238 | import json
f = open('all_bytes.txt', 'wb')
f.close()
def recursive_search(json_object, path=""):
if type(json_object) == dict:
for key in json_object:
if key == 'Byte' and type(json_object[key]) == list:
level_bytes = bytes(json_object[key]).hex()
f.write(path.encode('utf-8') + '\n\n'.encode('utf-8') + bytes.fromhex(level_bytes) + '\n\n\n\n\n\n\n'.encode('utf-8'))
else:
recursive_search(json_object[key], path + '[' + key + ']')
elif type(json_object) == list:
for i in range(len(json_object)):
recursive_search(json_object[i], path + '[' + str(i) + ']')
elif type(json_object) == int or type(json_object) == float or type(json_object) == str or type(json_object) == bool:
pass
else:
print('Broke on type :')
print(type(json_object)) | null |
160,239 | import json
import os
import subprocess
import tkinter as tk
from tkinter import filedialog, ttk
from fix_host_save import sav_to_json
guid_cache = {}
def update_guid_dropdowns():
folder_path = entry_save.get()
players_folder = os.path.join(folder_path, 'Players')
if os.path.exists(players_folder) and os.path.isdir(players_folder):
# List all files and remove the '.sav' extension.
file_names = [
os.path.splitext(f)[0]
for f in os.listdir(players_folder)
if os.path.isfile(os.path.join(players_folder, f)) and f.endswith('.sav')
]
global guid_cache
if file_names != list(guid_cache.keys()):
level_json = sav_to_json(folder_path + '/Level.sav')
usernames = [
find_guid_info(level_json, guid)
for guid in file_names
]
guid_cache = dict(zip(file_names, usernames))
else:
usernames = list(guid_cache.values())
if not combo_new_guid.get() in usernames:
combo_new_guid.set('')
if not combo_old_guid.get() in usernames:
combo_old_guid.set('')
combo_new_guid['values'] = usernames
combo_old_guid['values'] = usernames
def save_config():
config = {
'save_path': entry_save.get(),
'new_guid': combo_new_guid.get(),
'old_guid': combo_old_guid.get(),
'guild_fix': guild_fix_var.get(),
}
with open(config_file, 'w') as f:
json.dump(config, f)
tk.Label(app, text='Path to save folder:').pack()
tk.Label(app, text='The new character to overwrite:').pack()
tk.Label(app, text='The old character to fix/keep:').pack()
def browse_folder(entry):
foldername = filedialog.askdirectory()
if foldername != '':
guid_cache = {}
entry.delete(0, tk.END)
entry.insert(0, foldername)
save_config()
update_guid_dropdowns() | null |
160,240 | import json
import os
import subprocess
import tkinter as tk
from tkinter import filedialog, ttk
from fix_host_save import sav_to_json
guid_cache = {}
def update_guid_dropdowns():
folder_path = entry_save.get()
players_folder = os.path.join(folder_path, 'Players')
if os.path.exists(players_folder) and os.path.isdir(players_folder):
# List all files and remove the '.sav' extension.
file_names = [
os.path.splitext(f)[0]
for f in os.listdir(players_folder)
if os.path.isfile(os.path.join(players_folder, f)) and f.endswith('.sav')
]
global guid_cache
if file_names != list(guid_cache.keys()):
level_json = sav_to_json(folder_path + '/Level.sav')
usernames = [
find_guid_info(level_json, guid)
for guid in file_names
]
guid_cache = dict(zip(file_names, usernames))
else:
usernames = list(guid_cache.values())
if not combo_new_guid.get() in usernames:
combo_new_guid.set('')
if not combo_old_guid.get() in usernames:
combo_old_guid.set('')
combo_new_guid['values'] = usernames
combo_old_guid['values'] = usernames
entry_save = tk.Entry(app, width=50)
entry_save.pack()
entry_save.bind('<KeyRelease>', on_entry_change)
combo_new_guid = ttk.Combobox(app, postcommand=update_guid_dropdowns)
combo_new_guid.pack()
combo_old_guid = ttk.Combobox(app, postcommand=update_guid_dropdowns)
combo_old_guid.pack()
guild_fix_var = tk.BooleanVar()
def run_command():
save_path = entry_save.get()
new_guid = list(guid_cache.keys())[combo_new_guid.current()]
old_guid = list(guid_cache.keys())[combo_old_guid.current()]
guild_fix = guild_fix_var.get()
command = (
f'python fix_host_save.py "{save_path}" {new_guid.replace(".sav", "")} {old_guid.replace(".sav", "")} {guild_fix}'
)
subprocess.run(command, shell=True)
update_guid_dropdowns() | null |
160,241 | import json
import os
import subprocess
import tkinter as tk
from tkinter import filedialog, ttk
from fix_host_save import sav_to_json
def save_config():
config = {
'save_path': entry_save.get(),
'new_guid': combo_new_guid.get(),
'old_guid': combo_old_guid.get(),
'guild_fix': guild_fix_var.get(),
}
with open(config_file, 'w') as f:
json.dump(config, f)
def on_entry_change(event):
save_config() | null |
160,242 | import json
import os
import subprocess
import tkinter as tk
from tkinter import filedialog, ttk
from fix_host_save import sav_to_json
config_file = 'config.json'
def update_guid_dropdowns():
folder_path = entry_save.get()
players_folder = os.path.join(folder_path, 'Players')
if os.path.exists(players_folder) and os.path.isdir(players_folder):
# List all files and remove the '.sav' extension.
file_names = [
os.path.splitext(f)[0]
for f in os.listdir(players_folder)
if os.path.isfile(os.path.join(players_folder, f)) and f.endswith('.sav')
]
global guid_cache
if file_names != list(guid_cache.keys()):
level_json = sav_to_json(folder_path + '/Level.sav')
usernames = [
find_guid_info(level_json, guid)
for guid in file_names
]
guid_cache = dict(zip(file_names, usernames))
else:
usernames = list(guid_cache.values())
if not combo_new_guid.get() in usernames:
combo_new_guid.set('')
if not combo_old_guid.get() in usernames:
combo_old_guid.set('')
combo_new_guid['values'] = usernames
combo_old_guid['values'] = usernames
entry_save = tk.Entry(app, width=50)
entry_save.pack()
entry_save.bind('<KeyRelease>', on_entry_change)
combo_new_guid = ttk.Combobox(app, postcommand=update_guid_dropdowns)
combo_new_guid.pack()
combo_old_guid = ttk.Combobox(app, postcommand=update_guid_dropdowns)
combo_old_guid.pack()
guild_fix_var = tk.BooleanVar()
def load_config():
if os.path.exists(config_file):
with open(config_file, 'r') as f:
config = json.load(f)
entry_save.insert(0, config.get('save_path', ''))
update_guid_dropdowns()
combo_new_guid.set(config.get('new_guid', ''))
combo_old_guid.set(config.get('old_guid', ''))
guild_fix_var.set(config.get('guild_fix', '')) | null |
160,243 | import csv
import logging
import os
import string
import sys
import time
from dataclasses import dataclass, field
from datetime import timedelta
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import transformers
from accelerate import Accelerator, InitProcessGroupKwargs
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDatasetDict,
load_dataset,
)
from huggingface_hub import HfFolder, Repository, create_repo, get_full_repo_name
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
)
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `shift_tokens_right` function. Write a Python function `def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray` to solve the following problem:
Shift label ids one token to the right.
Here is the function:
def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray:
"""
Shift label ids one token to the right.
"""
shifted_label_ids = np.zeros_like(label_ids)
shifted_label_ids[:, 1:] = label_ids[:, :-1]
shifted_label_ids[:, 0] = decoder_start_token_id
return shifted_label_ids | Shift label ids one token to the right. |
160,244 | import csv
import logging
import os
import string
import sys
import time
from dataclasses import dataclass, field
from datetime import timedelta
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import transformers
from accelerate import Accelerator, InitProcessGroupKwargs
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDatasetDict,
load_dataset,
)
from huggingface_hub import HfFolder, Repository, create_repo, get_full_repo_name
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
)
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `log_metric` function. Write a Python function `def log_metric( accelerator, metrics: Dict, train_time: float, prefix: str = "eval", )` to solve the following problem:
Helper function to log all evaluation metrics with the correct prefixes and styling.
Here is the function:
def log_metric(
accelerator,
metrics: Dict,
train_time: float,
prefix: str = "eval",
):
"""Helper function to log all evaluation metrics with the correct prefixes and styling."""
log_metrics = {}
for k, v in metrics.items():
log_metrics[f"{prefix}/{k}"] = v
log_metrics[f"{prefix}/time"] = train_time
accelerator.log(log_metrics) | Helper function to log all evaluation metrics with the correct prefixes and styling. |
160,245 | import csv
import logging
import os
import string
import sys
import time
from dataclasses import dataclass, field
from datetime import timedelta
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import transformers
from accelerate import Accelerator, InitProcessGroupKwargs
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDatasetDict,
load_dataset,
)
from huggingface_hub import HfFolder, Repository, create_repo, get_full_repo_name
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
)
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `log_pred` function. Write a Python function `def log_pred( accelerator, pred_str: List[str], label_str: List[str], norm_pred_str: List[str], norm_label_str: List[str], prefix: str = "eval", num_lines: int = 200000, )` to solve the following problem:
Helper function to log target/predicted transcriptions to weights and biases (wandb).
Here is the function:
def log_pred(
accelerator,
pred_str: List[str],
label_str: List[str],
norm_pred_str: List[str],
norm_label_str: List[str],
prefix: str = "eval",
num_lines: int = 200000,
):
"""Helper function to log target/predicted transcriptions to weights and biases (wandb)."""
if accelerator.is_main_process:
wandb_tracker = accelerator.get_tracker("wandb")
# pretty name for split
prefix = prefix.replace("/", "-")
# convert str data to a wandb compatible format
str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))]
# log as a table with the appropriate headers
wandb_tracker.log_table(
table_name=f"{prefix}/all_predictions",
columns=["Target", "Pred", "Norm Target", "Norm Pred"],
data=str_data[:num_lines],
)
# log incorrect normalised predictions
str_data = np.asarray(str_data)
str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]]
# log as a table with the appropriate headers
wandb_tracker.log_table(
table_name=f"{prefix}/incorrect_predictions",
columns=["Target", "Pred", "Norm Target", "Norm Pred"],
data=str_data_incorrect[:num_lines],
) | Helper function to log target/predicted transcriptions to weights and biases (wandb). |
160,246 | import csv
import logging
import os
import string
import sys
import time
from dataclasses import dataclass, field
from datetime import timedelta
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import transformers
from accelerate import Accelerator, InitProcessGroupKwargs
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDatasetDict,
load_dataset,
)
from huggingface_hub import HfFolder, Repository, create_repo, get_full_repo_name
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
)
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `convert_dataset_str_to_list` function. Write a Python function `def convert_dataset_str_to_list( dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_samples=None, default_split="train", ) -> List[Dict]` to solve the following problem:
Given three lists of dataset names, configs and splits, this function groups the corresponding names/configs/splits. Each dataset is assigned a unique dictionary with these metadata values, and the function returns a list of dictionaries, one for each dataset.
Here is the function:
def convert_dataset_str_to_list(
dataset_names,
dataset_config_names,
splits=None,
text_column_names=None,
dataset_samples=None,
default_split="train",
) -> List[Dict]:
"""
Given three lists of dataset names, configs and splits, this function groups the corresponding
names/configs/splits. Each dataset is assigned a unique dictionary with these metadata values, and the
function returns a list of dictionaries, one for each dataset.
"""
if isinstance(dataset_names, str):
dataset_names = dataset_names.split("+")
dataset_config_names = dataset_config_names.split("+")
splits = splits.split("+") if splits is not None else None
text_column_names = text_column_names.split("+") if text_column_names is not None else None
dataset_samples = dataset_samples.split("+") if dataset_samples is not None else None
# basic checks to ensure we've got the right number of datasets/configs/splits/columns/probs
if len(dataset_names) != len(dataset_config_names):
raise ValueError(
f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(dataset_config_names)} configs."
)
if splits is not None and len(splits) != len(dataset_names):
raise ValueError(
f"Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits."
)
if text_column_names is not None and len(text_column_names) != len(dataset_names):
raise ValueError(
f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(text_column_names)} text column names."
)
if dataset_samples is not None:
if len(dataset_samples) != len(dataset_names):
raise ValueError(
f"Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and "
f"{len(dataset_samples)} samples."
)
dataset_samples = [float(ds_sample) for ds_sample in dataset_samples]
else:
dataset_samples = [None] * len(dataset_names)
text_column_names = (
text_column_names if text_column_names is not None else ["text" for _ in range(len(dataset_names))]
)
splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))]
dataset_names_dict = []
for i, ds_name in enumerate(dataset_names):
dataset_names_dict.append(
{
"name": ds_name,
"config": dataset_config_names[i],
"split": splits[i],
"text_column_name": text_column_names[i],
"samples": dataset_samples[i],
}
)
return dataset_names_dict | Given three lists of dataset names, configs and splits, this function groups the corresponding names/configs/splits. Each dataset is assigned a unique dictionary with these metadata values, and the function returns a list of dictionaries, one for each dataset. |
160,247 | import logging
import os
import sys
import time
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import torch
import transformers
from datasets import DatasetDict, IterableDatasetDict, load_dataset
from jiwer import process_words, wer_default
from nltk import ngrams
from tqdm import tqdm
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperTokenizer,
is_tensorboard_available,
is_wandb_available,
pipeline,
)
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def write_metric(summary_writer, eval_metrics, prefix="eval"):
for metric_name, value in eval_metrics.items():
summary_writer.add_scalar(f"{prefix}/{metric_name}", value, 0) | null |
160,248 | import logging
import os
import sys
import time
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import torch
import transformers
from datasets import DatasetDict, IterableDatasetDict, load_dataset
from jiwer import process_words, wer_default
from nltk import ngrams
from tqdm import tqdm
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperTokenizer,
is_tensorboard_available,
is_wandb_available,
pipeline,
)
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def write_wandb_metric(wandb_logger, metrics, train_time, prefix):
log_metrics = {}
for k, v in metrics.items():
log_metrics[f"{prefix}/{k}"] = v
log_metrics[f"{prefix}/time"] = train_time
wandb_logger.log(log_metrics) | null |
160,249 | import logging
import os
import sys
import time
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import torch
import transformers
from datasets import DatasetDict, IterableDatasetDict, load_dataset
from jiwer import process_words, wer_default
from nltk import ngrams
from tqdm import tqdm
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperTokenizer,
is_tensorboard_available,
is_wandb_available,
pipeline,
)
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def convert_audio_to_wandb(wandb_logger, audio):
return wandb_logger.Audio(audio["array"][:, np.newaxis], sample_rate=audio["sampling_rate"])
def data(dataset, text_column_name="text", log_audio=False):
for item in dataset:
yield {**item["audio"], "reference": item[text_column_name], "audio": item["audio"] if log_audio else None}
def write_wandb_pred(
wandb_logger,
eval_audios,
pred_str,
label_str,
norm_pred_str,
norm_label_str,
prefix="eval",
):
columns = ["Target", "Pred", "Norm Target", "Norm Pred"]
# convert str data to a wandb compatible format
str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))]
if len(eval_audios) > 0:
columns.insert(0, "Audio")
str_data = [
[
convert_audio_to_wandb(wandb_logger, eval_audios[i]),
*str_data[i],
]
for i in range(len(pred_str))
]
# log as a table with the appropriate headers
wandb_logger.log(
{f"{prefix}/predictions": wandb_logger.Table(columns=columns, data=str_data)},
) | null |
160,250 | import logging
import os
import sys
import time
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import torch
import transformers
from datasets import DatasetDict, IterableDatasetDict, load_dataset
from jiwer import process_words, wer_default
from nltk import ngrams
from tqdm import tqdm
from transformers import (
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperTokenizer,
is_tensorboard_available,
is_wandb_available,
pipeline,
)
from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def convert_dataset_str_to_list(
dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_hours=None, default_split="train"
):
if isinstance(dataset_names, str):
dataset_names = dataset_names.split("+")
dataset_config_names = dataset_config_names.split("+")
splits = splits.split("+") if splits is not None else None
text_column_names = text_column_names.split("+") if text_column_names is not None else None
dataset_hours = dataset_hours.split("+") if dataset_hours is not None else None
# basic checks to ensure we've got the right number of datasets/configs/splits/columns/probs
if len(dataset_names) != len(dataset_config_names):
raise ValueError(
f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(dataset_config_names)} configs."
)
if splits is not None and len(splits) != len(dataset_names):
raise ValueError(
f"Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits."
)
if text_column_names is not None and len(text_column_names) != len(dataset_names):
raise ValueError(
f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(text_column_names)} text column names."
)
if dataset_hours is not None:
if len(dataset_hours) != len(dataset_names):
raise ValueError(
f"Ensure one probability is passed for each dataset, got {len(dataset_names)} datasets and "
f"{len(dataset_hours)} hours."
)
dataset_hours = [float(ds_hours) for ds_hours in dataset_hours]
else:
dataset_hours = [None] * len(dataset_names)
text_column_names = (
text_column_names if text_column_names is not None else ["text" for _ in range(len(dataset_names))]
)
splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))]
dataset_names_dict = []
for i, ds_name in enumerate(dataset_names):
dataset_names_dict.append(
{
"name": ds_name,
"config": dataset_config_names[i],
"split": splits[i],
"text_column_name": text_column_names[i],
"hours": dataset_hours[i],
}
)
return dataset_names_dict | null |
160,251 | import argparse
import copy
import logging
import numpy as np
import torch
from transformers import GenerationConfig, WhisperForConditionalGeneration, WhisperProcessor
def parse_args():
parser = argparse.ArgumentParser(
description="Initialise a student Whisper model from a teacher model, copying the relevant layer weights and adjusting the processor as necessary."
)
parser.add_argument(
"--teacher_checkpoint",
type=str,
required=True,
help="The HF Hub ID of the teacher checkpoint.",
)
parser.add_argument(
"--subfolder",
type=str,
default="",
help="In case the relevant teacher weights are located inside a subfolder of the model repo on huggingface.co, you "
"can specify the folder name here.",
)
parser.add_argument(
"--encoder_layers",
type=int,
default=None,
help="Number of encoder layers to use in the student model. Defaults to all layers from the teacher.",
)
parser.add_argument(
"--decoder_layers",
type=int,
default=2,
help="Number of decoder layers to use in the student model. Defaults to 2 layers.",
)
parser.add_argument(
"--save_dir",
type=str,
required=True,
help="Where to save the student weights and processor.",
)
parser.add_argument(
"--push_to_hub",
type=bool,
required=False,
default=False,
help="Whether to push the student weights and processor to the Hub.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Where to store the pretrained models downloaded from huggingface.co",
)
args = parser.parse_args()
return args | null |
160,252 | import argparse
import copy
import logging
import numpy as np
import torch
from transformers import GenerationConfig, WhisperForConditionalGeneration, WhisperProcessor
logger = logging.getLogger(__name__)
def init_student_model_from_teacher(
teacher_checkpoint,
encoder_layers=None,
decoder_layers=2,
save_dir=None,
push_to_hub=None,
cache_dir=None,
subfolder="",
):
teacher_model = WhisperForConditionalGeneration.from_pretrained(
teacher_checkpoint,
cache_dir=cache_dir,
subfolder=subfolder,
low_cpu_mem_usage=True,
)
processor = WhisperProcessor.from_pretrained(teacher_checkpoint)
generation_config = GenerationConfig.from_pretrained(teacher_checkpoint)
teacher_config = teacher_model.config
teacher_encoder_layers = teacher_config.encoder_layers
teacher_decoder_layers = teacher_config.decoder_layers
student_config = copy.deepcopy(teacher_config)
student_config.update(
{
"encoder_layers": encoder_layers if encoder_layers is not None else teacher_encoder_layers,
"decoder_layers": decoder_layers,
}
)
encoder_mapping = np.linspace(0, teacher_encoder_layers - 1, student_config.encoder_layers, dtype=int)
encoder_mapping[-1] = teacher_encoder_layers - 1
encoder_map = {}
for student_layer, teacher_layer in enumerate(encoder_mapping):
encoder_map[teacher_layer] = student_layer
decoder_mapping = np.linspace(0, teacher_decoder_layers - 1, student_config.decoder_layers, dtype=int)
decoder_mapping[-1] = teacher_decoder_layers - 1
decoder_map = {}
for student_layer, teacher_layer in enumerate(decoder_mapping):
decoder_map[teacher_layer] = student_layer
# init the student params from the teacher model
student_model = WhisperForConditionalGeneration(student_config)
missing_keys, unexpected_keys = student_model.load_state_dict(teacher_model.state_dict(), strict=False)
if len(missing_keys) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for WhisperForConditionalGeneration. \n"
f"Missing key(s) in state_dict: {missing_keys}"
)
if decoder_layers == teacher_decoder_layers:
decoder_keys = [key for key in unexpected_keys if "model.decoder.layers" in key]
if len(decoder_keys) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for WhisperForConditionalGeneration. \n"
f"Unexpected key(s) in state_dict: {decoder_keys}"
)
if encoder_layers == teacher_encoder_layers:
encoder_keys = [key for key in unexpected_keys if "model.encoder.layers" in key]
if len(encoder_keys) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for WhisperForConditionalGeneration. \n"
f"Unexpected key(s) in state_dict: {encoder_keys}"
)
for layer in range(teacher_decoder_layers):
if layer in decoder_map:
# re-introduce pre-defined layers from the teacher
student_model.model.decoder.layers[decoder_map[layer]].load_state_dict(
teacher_model.model.decoder.layers[layer].state_dict()
)
if encoder_layers is not None:
for layer in range(teacher_encoder_layers):
if layer in encoder_map:
# re-introduce pre-defined layers from the teacher
student_model.model.encoder.layers[encoder_map[layer]].load_state_dict(
teacher_model.model.encoder.layers[layer].state_dict()
)
# remove the teacher params and model
del teacher_model
# save the converted weights and model
if save_dir is not None:
student_model.save_pretrained(save_dir)
# we also need to correctly save the processor and generation config
processor.save_pretrained(save_dir)
generation_config.save_pretrained(save_dir)
# check we can do a forward pass with the saved model - first load the weights and processor
logger.info("Checking we can load the saved model...")
student_model = WhisperForConditionalGeneration.from_pretrained(
save_dir,
low_cpu_mem_usage=True,
)
processor = WhisperProcessor.from_pretrained(save_dir)
# define some random inputs
input_features = processor(np.ones(16000), sampling_rate=16000, return_tensors="pt").input_features
decoder_start_token_id = student_model.config.decoder_start_token_id
decoder_input_ids = torch.ones((input_features.shape[0], 1), dtype=torch.long) * decoder_start_token_id
# do a forward pass - outputs will be gibberish for the initialised model so we can't check them
# but we make can sure the model runs as expected
logger.info("Checking we can run the converted model forward...")
_ = student_model(input_features, decoder_input_ids=decoder_input_ids).logits
logger.info("Conversion successful!")
if push_to_hub:
student_model.push_to_hub(save_dir)
processor.push_to_hub(save_dir)
generation_config.push_to_hub(save_dir) | null |
160,256 | import logging
import os
import re
import shutil
import sys
import time
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import torch.nn as nn
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDataset,
IterableDatasetDict,
concatenate_datasets,
interleave_datasets,
load_dataset,
)
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AddedToken,
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
get_scheduler,
set_seed,
)
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `log_metric` function. Write a Python function `def log_metric( accelerator, metrics: Dict, train_time: float, step: int, epoch: int, learning_rate: float = None, prefix: str = "train", )` to solve the following problem:
Helper function to log all training/evaluation metrics with the correct prefixes and styling.
Here is the function:
def log_metric(
accelerator,
metrics: Dict,
train_time: float,
step: int,
epoch: int,
learning_rate: float = None,
prefix: str = "train",
):
"""Helper function to log all training/evaluation metrics with the correct prefixes and styling."""
log_metrics = {}
for k, v in metrics.items():
log_metrics[f"{prefix}/{k}"] = v
log_metrics[f"{prefix}/time"] = train_time
log_metrics[f"{prefix}/epoch"] = epoch
if learning_rate is not None:
log_metrics[f"{prefix}/learning_rate"] = learning_rate
accelerator.log(log_metrics, step=step) | Helper function to log all training/evaluation metrics with the correct prefixes and styling. |
160,257 | import logging
import os
import re
import shutil
import sys
import time
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import torch.nn as nn
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDataset,
IterableDatasetDict,
concatenate_datasets,
interleave_datasets,
load_dataset,
)
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AddedToken,
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
get_scheduler,
set_seed,
)
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `log_pred` function. Write a Python function `def log_pred( accelerator, pred_str: List[str], label_str: List[str], norm_pred_str: List[str], norm_label_str: List[str], step: int, prefix: str = "eval", num_lines: int = 200000, )` to solve the following problem:
Helper function to log target/predicted transcriptions to weights and biases (wandb).
Here is the function:
def log_pred(
accelerator,
pred_str: List[str],
label_str: List[str],
norm_pred_str: List[str],
norm_label_str: List[str],
step: int,
prefix: str = "eval",
num_lines: int = 200000,
):
"""Helper function to log target/predicted transcriptions to weights and biases (wandb)."""
if accelerator.is_main_process:
wandb_tracker = accelerator.get_tracker("wandb")
# pretty name for current step: step 50000 -> step 50k
cur_step_pretty = f"{int(step // 1000)}k" if step > 1000 else step
prefix_pretty = prefix.replace("/", "-")
# convert str data to a wandb compatible format
str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))]
# log as a table with the appropriate headers
wandb_tracker.log_table(
table_name=f"predictions/{prefix_pretty}-step-{cur_step_pretty}",
columns=["Target", "Pred", "Norm Target", "Norm Pred"],
data=str_data[:num_lines],
step=step,
)
# log incorrect normalised predictions
str_data = np.asarray(str_data)
str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]]
# log as a table with the appropriate headers
wandb_tracker.log_table(
table_name=f"incorrect_predictions/{prefix_pretty}-step-{cur_step_pretty}",
columns=["Target", "Pred", "Norm Target", "Norm Pred"],
data=str_data_incorrect[:num_lines],
step=step,
) | Helper function to log target/predicted transcriptions to weights and biases (wandb). |
160,258 | import logging
import os
import re
import shutil
import sys
import time
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import torch.nn as nn
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDataset,
IterableDatasetDict,
concatenate_datasets,
interleave_datasets,
load_dataset,
)
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AddedToken,
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
get_scheduler,
set_seed,
)
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
def convert_dataset_str_to_list(
dataset_names,
dataset_config_names,
splits=None,
text_column_names=None,
dataset_samples=None,
default_split="train",
) -> List[Dict]:
"""
Given three lists of dataset names, configs and splits, this function groups the corresponding
names/configs/splits. Each dataset is assigned a unique dictionary with these metadata values, and the
function returns a list of dictionaries, one for each dataset.
"""
if isinstance(dataset_names, str):
dataset_names = dataset_names.split("+")
dataset_config_names = dataset_config_names.split("+")
splits = splits.split("+") if splits is not None else None
text_column_names = text_column_names.split("+") if text_column_names is not None else None
dataset_samples = dataset_samples.split("+") if dataset_samples is not None else None
# basic checks to ensure we've got the right number of datasets/configs/splits/columns/probs
if len(dataset_names) != len(dataset_config_names):
raise ValueError(
f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(dataset_config_names)} configs."
)
if splits is not None and len(splits) != len(dataset_names):
raise ValueError(
f"Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits."
)
if text_column_names is not None and len(text_column_names) != len(dataset_names):
raise ValueError(
f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(text_column_names)} text column names."
)
if dataset_samples is not None:
if len(dataset_samples) != len(dataset_names):
raise ValueError(
f"Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and "
f"{len(dataset_samples)} samples."
)
dataset_samples = [float(ds_sample) for ds_sample in dataset_samples]
else:
dataset_samples = [None] * len(dataset_names)
text_column_names = (
text_column_names if text_column_names is not None else ["text" for _ in range(len(dataset_names))]
)
splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))]
dataset_names_dict = []
for i, ds_name in enumerate(dataset_names):
dataset_names_dict.append(
{
"name": ds_name,
"config": dataset_config_names[i],
"split": splits[i],
"text_column_name": text_column_names[i],
"samples": dataset_samples[i],
}
)
return dataset_names_dict
def load_multiple_datasets(
dataset_names: Union[List, str],
dataset_config_names: Union[List, str],
splits: Optional[Union[List, str]] = None,
text_column_names: Optional[List] = None,
sampling_rate: Optional[int] = 16000,
stopping_strategy: Optional[str] = "first_exhausted",
dataset_samples: Optional[Union[List, np.array]] = None,
streaming: Optional[bool] = True,
seed: Optional[int] = None,
accelerator: Optional[Accelerator] = None,
use_pseudo_labels: float = None,
**kwargs,
) -> IterableDataset:
dataset_names_dict = convert_dataset_str_to_list(
dataset_names, dataset_config_names, splits, text_column_names, dataset_samples
)
if dataset_samples is not None:
dataset_samples = [ds_dict["samples"] for ds_dict in dataset_names_dict]
probabilities = np.array(dataset_samples) / np.sum(dataset_samples)
else:
probabilities = None
all_datasets = []
# iterate over the datasets we want to interleave
for dataset_dict in tqdm(
dataset_names_dict,
desc="Combining datasets...",
disable=not accelerator.is_local_main_process if accelerator is not None else False,
):
dataset = load_dataset(
dataset_dict["name"],
dataset_dict["config"],
split=dataset_dict["split"],
streaming=streaming,
**kwargs,
)
# resample to specified sampling rate
dataset = dataset.cast_column("audio", datasets.features.Audio(sampling_rate))
dataset_features = dataset.features.keys()
columns_to_keep = {"audio", "text"}
if dataset_dict["text_column_name"] not in dataset_features:
raise ValueError(
f"Text column name {dataset_dict['text_column_name']} not found in dataset"
f" '{dataset_dict['name']}'. Make sure to set `--text_column_name` to the"
f" correct text column - one of {', '.join(dataset_features)}."
)
# blanket renaming of all transcription columns to text
if dataset_dict["text_column_name"] != "text":
dataset = dataset.rename_column(dataset_dict["text_column_name"], "text")
if use_pseudo_labels:
if "whisper_transcript" not in dataset_features:
raise ValueError(
f"Pseudo-label column `whisper_transcript` not found in dataset {dataset_dict['name']}. Ensure"
"pseudo-labels are present in the dataset under this column name, or train directly on the text "
"labels by setting `--use_pseudo_labels=False` and defining the appropriate `--text_column_name`."
)
columns_to_keep.add("whisper_transcript")
dataset_features = dataset.features.keys()
dataset = dataset.remove_columns(set(dataset_features - columns_to_keep))
all_datasets.append(dataset)
if len(all_datasets) == 1:
# we have a single dataset so just return it as is
return all_datasets[0]
if streaming:
interleaved_dataset = interleave_datasets(
all_datasets,
stopping_strategy=stopping_strategy,
probabilities=probabilities,
seed=seed,
)
else:
interleaved_dataset = concatenate_datasets(all_datasets)
return interleaved_dataset | null |
160,259 | import logging
import os
import re
import shutil
import sys
import time
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import torch.nn as nn
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDataset,
IterableDatasetDict,
concatenate_datasets,
interleave_datasets,
load_dataset,
)
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AddedToken,
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
get_scheduler,
set_seed,
)
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `get_layers_to_supervise` function. Write a Python function `def get_layers_to_supervise(student_layers: int, teacher_layers: int) -> Dict` to solve the following problem:
Helper function to map the student layer i to the teacher layer j whose output we'd like them to emulate. Used for MSE loss terms in distillation (hidden-states and activations). Student layers are paired with teacher layers in equal increments, e.g. for a 12-layer model distilled to a 3-layer model, student layer 0 emulates teacher layer 3 (such that it behaves like the first 4 teacher layers), student layer 1 emulates teacher layer 7, and student layer 2 emulates teacher layer 11. This mapping is summarised by the dictionary: {0: 3, 1: 7, 2: 11}, which is precisely the output of this function for the arguments (student_layers=3, teacher_layers=12).
Here is the function:
def get_layers_to_supervise(student_layers: int, teacher_layers: int) -> Dict:
"""Helper function to map the student layer i to the teacher layer j whose output we'd like them to emulate. Used
for MSE loss terms in distillation (hidden-states and activations). Student layers are paired with teacher layers
in equal increments, e.g. for a 12-layer model distilled to a 3-layer model, student layer 0 emulates teacher layer
3 (such that it behaves like the first 4 teacher layers), student layer 1 emulates teacher layer 7, and student layer
2 emulates teacher layer 11. This mapping is summarised by the dictionary: {0: 3, 1: 7, 2: 11}, which is precisely
the output of this function for the arguments (student_layers=3, teacher_layers=12)."""
layer_intervals = np.linspace(teacher_layers // student_layers - 1, teacher_layers - 1, student_layers, dtype=int)
layer_intervals[-1] = teacher_layers - 1
layer_map = {}
for student_layer, teacher_layer in enumerate(layer_intervals):
layer_map[student_layer] = teacher_layer
return layer_map | Helper function to map the student layer i to the teacher layer j whose output we'd like them to emulate. Used for MSE loss terms in distillation (hidden-states and activations). Student layers are paired with teacher layers in equal increments, e.g. for a 12-layer model distilled to a 3-layer model, student layer 0 emulates teacher layer 3 (such that it behaves like the first 4 teacher layers), student layer 1 emulates teacher layer 7, and student layer 2 emulates teacher layer 11. This mapping is summarised by the dictionary: {0: 3, 1: 7, 2: 11}, which is precisely the output of this function for the arguments (student_layers=3, teacher_layers=12). |
160,260 | import logging
import os
import re
import shutil
import sys
import time
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import torch.nn as nn
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDataset,
IterableDatasetDict,
concatenate_datasets,
interleave_datasets,
load_dataset,
)
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AddedToken,
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
get_scheduler,
set_seed,
)
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
logger = get_logger(__name__)
def sorted_checkpoints(output_dir=None, checkpoint_prefix="checkpoint") -> List[str]:
"""Helper function to sort saved checkpoints from oldest to newest."""
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
for path in glob_checkpoints:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
The provided code snippet includes necessary dependencies for implementing the `rotate_checkpoints` function. Write a Python function `def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix="checkpoint") -> None` to solve the following problem:
Helper function to delete old checkpoints.
Here is the function:
def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix="checkpoint") -> None:
"""Helper function to delete old checkpoints."""
if save_total_limit is None or save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = sorted_checkpoints(output_dir=output_dir, checkpoint_prefix=checkpoint_prefix)
if len(checkpoints_sorted) <= save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint, ignore_errors=True) | Helper function to delete old checkpoints. |
160,261 | import logging
import os
import re
import shutil
import sys
import time
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import torch.nn as nn
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDataset,
IterableDatasetDict,
concatenate_datasets,
interleave_datasets,
load_dataset,
)
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AddedToken,
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
get_scheduler,
set_seed,
)
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
_RE_CHECKPOINT = re.compile(r"^checkpoint-(\d+)-epoch-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _RE_CHECKPOINT.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_RE_CHECKPOINT.search(x).groups()[0]))) | null |
160,262 | import logging
import os
import re
import shutil
import sys
import time
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import datasets
import evaluate
import numpy as np
import torch
import torch.nn as nn
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import (
DatasetDict,
IterableDataset,
IterableDatasetDict,
concatenate_datasets,
interleave_datasets,
load_dataset,
)
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AddedToken,
HfArgumentParser,
Seq2SeqTrainingArguments,
WhisperConfig,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizerFast,
get_scheduler,
set_seed,
)
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
The provided code snippet includes necessary dependencies for implementing the `get_parameter_names` function. Write a Python function `def get_parameter_names(model, forbidden_layer_types, forbidden_module=None)` to solve the following problem:
Returns the names of the model parameters that are not inside a forbidden layer or forbidden module. Can be used to get a subset of parameter names for decay masks, or to exclude parameters from an optimiser (e.g. if the module is frozen).
Here is the function:
def get_parameter_names(model, forbidden_layer_types, forbidden_module=None):
"""
Returns the names of the model parameters that are not inside a forbidden layer or forbidden module.
Can be used to get a subset of parameter names for decay masks, or to exclude parameters from an optimiser
(e.g. if the module is frozen).
"""
result = []
for name, child in model.named_children():
result += [
f"{name}.{n}"
for n in get_parameter_names(child, forbidden_layer_types, forbidden_module)
if not (
isinstance(child, tuple(forbidden_layer_types))
or (child in tuple(forbidden_module) if forbidden_module is not None else False)
)
]
# Add model specific parameters (defined with nn.Parameter) since they are not in any child.
result += list(model._parameters.keys())
return result | Returns the names of the model parameters that are not inside a forbidden layer or forbidden module. Can be used to get a subset of parameter names for decay masks, or to exclude parameters from an optimiser (e.g. if the module is frozen). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.