code stringlengths 101 5.91M |
|---|
def noise_vector(x, y, z, int_x, int_y, int_z, seed_offset):
vector = _get_vector(int_x, int_y, int_z, seed_offset)
diff_vector = ((x - int_x), (y - int_y), (z - int_z))
return (((vector[0] * diff_vector[0]) + (vector[1] * diff_vector[1])) + (vector[2] * diff_vector[2])) |
class VideoTestVimeo90KDataset(data.Dataset):
def __init__(self, opt):
super(VideoTestVimeo90KDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
if self.cache_data:
raise NotImplementedError('cache_data in Vimeo90K-Test dataset is not implemented.')
(self.gt_root, self.lq_root) = (opt['dataroot_gt'], opt['dataroot_lq'])
self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []}
neighbor_list = [(i + ((9 - opt['num_frame']) // 2)) for i in range(opt['num_frame'])]
self.file_client = None
self.io_backend_opt = opt['io_backend']
assert (self.io_backend_opt['type'] != 'lmdb'), 'No need to use lmdb during validation/test.'
logger = get_root_logger()
logger.info(f"Generate data info for VideoTestDataset - {opt['name']}")
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
for (idx, subfolder) in enumerate(subfolders):
gt_path = osp.join(self.gt_root, subfolder, 'im4.png')
self.data_info['gt_path'].append(gt_path)
lq_paths = [osp.join(self.lq_root, subfolder, f'im{i}.png') for i in neighbor_list]
self.data_info['lq_path'].append(lq_paths)
self.data_info['folder'].append('vimeo90k')
self.data_info['idx'].append(f'{idx}/{len(subfolders)}')
self.data_info['border'].append(0)
def __getitem__(self, index):
lq_path = self.data_info['lq_path'][index]
gt_path = self.data_info['gt_path'][index]
imgs_lq = util.read_img_seq(lq_path)
img_gt = util.read_img_seq([gt_path])
img_gt.squeeze_(0)
return {'lq': imgs_lq, 'gt': img_gt, 'folder': self.data_info['folder'][index], 'idx': self.data_info['idx'][index], 'border': self.data_info['border'][index], 'lq_path': lq_path[(self.opt['num_frame'] // 2)]}
def __len__(self):
return len(self.data_info['gt_path']) |
def ParseLstmDelayString(lstm_delay):
split1 = lstm_delay.split(' ')
lstm_delay_array = []
try:
for i in range(len(split1)):
indexes = [int(x) for x in split1[i].strip().lstrip('[').rstrip(']').strip().split(',')]
if (len(indexes) < 1):
raise ValueError(('invalid --lstm-delay argument, too-short element: ' + lstm_delay))
elif ((len(indexes) == 2) and ((indexes[0] * indexes[1]) >= 0)):
raise ValueError('Warning: {} is not a standard BLSTM mode. There should be a negative delay for the forward, and a postive delay for the backward.'.format(indexes))
if ((len(indexes) == 2) and (indexes[0] > 0)):
(indexes[0], indexes[1]) = (indexes[1], indexes[0])
lstm_delay_array.append(indexes)
except ValueError as e:
raise ValueError((('invalid --lstm-delay argument ' + lstm_delay) + str(e)))
return lstm_delay_array |
class MultiLabelField(Field[torch.Tensor]):
_already_warned_namespaces: Set[str] = set()
def __init__(self, labels: Sequence[Union[(str, int)]], label_namespace: str='labels', skip_indexing: bool=False, num_labels: Optional[int]=None) -> None:
self.labels = labels
self._label_namespace = label_namespace
self._label_ids = None
self._maybe_warn_for_namespace(label_namespace)
self._num_labels = num_labels
if skip_indexing:
if (not all((isinstance(label, int) for label in labels))):
raise ConfigurationError('In order to skip indexing, your labels must be integers. Found labels = {}'.format(labels))
if (not num_labels):
raise ConfigurationError("In order to skip indexing, num_labels can't be None.")
if (not all(((cast(int, label) < num_labels) for label in labels))):
raise ConfigurationError('All labels should be < num_labels. Found num_labels = {} and labels = {} '.format(num_labels, labels))
self._label_ids = labels
elif (not all((isinstance(label, str) for label in labels))):
raise ConfigurationError('MultiLabelFields expects string labels if skip_indexing=False. Found labels: {}'.format(labels))
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if (not (label_namespace.endswith('labels') or label_namespace.endswith('tags'))):
if (label_namespace not in self._already_warned_namespaces):
logger.warning("Your label namespace was '%s'. We recommend you use a namespace ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by default to your vocabulary. See documentation for `non_padded_namespaces` parameter in Vocabulary.", self._label_namespace)
self._already_warned_namespaces.add(label_namespace)
def count_vocab_items(self, counter: Dict[(str, Dict[(str, int)])]):
if (self._label_ids is None):
for label in self.labels:
counter[self._label_namespace][label] += 1
def index(self, vocab: Vocabulary):
if (self._label_ids is None):
self._label_ids = [vocab.get_token_index(label, self._label_namespace) for label in self.labels]
if (not self._num_labels):
self._num_labels = vocab.get_vocab_size(self._label_namespace)
def get_padding_lengths(self) -> Dict[(str, int)]:
return {}
def as_tensor(self, padding_lengths: Dict[(str, int)]) -> torch.Tensor:
tensor = torch.zeros(self._num_labels)
if self._label_ids:
tensor.scatter_(0, torch.LongTensor(self._label_ids), 1)
return tensor
def empty_field(self):
return MultiLabelField([], self._label_namespace, skip_indexing=True)
def __str__(self) -> str:
return f"MultiLabelField with labels: {self.labels} in namespace: '{self._label_namespace}'.'" |
def main():
args = parse_args()
neural_engine_graph = diffusion_utils.neural_engine_init(args.ir_path)
if (args.pipeline == 'text2img'):
dpm = DPMSolverMultistepScheduler.from_pretrained(args.input_model, subfolder='scheduler')
pipe = diffusion_utils.StableDiffusionPipeline.from_pretrained(args.input_model, scheduler=dpm)
pipe.safety_checker = (lambda images, clip_input: (images, False))
generator = torch.Generator('cpu').manual_seed(args.seed)
if (args.mode == 'latency'):
benchmark(pipe, neural_engine_graph, generator, args.steps)
return
if (args.mode == 'accuracy'):
from diffusers import StableDiffusionPipeline
original_pipe = StableDiffusionPipeline.from_pretrained(args.input_model)
accuracy(pipe, original_pipe, neural_engine_graph, generator)
return
executor(pipe, neural_engine_graph, args.prompt, args.name, args.size, generator)
if (args.pipeline == 'img2img'):
from diffusion_utils_img2img import StableDiffusionImg2ImgPipeline
import requests
from PIL import Image
from io import BytesIO
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(args.input_model)
url = '
response = requests.get(url)
init_image = Image.open(BytesIO(response.content)).convert('RGB')
init_image = init_image.resize((768, 512))
prompt = 'A fantasy landscape, trending on artstation'
images = pipe(prompt=prompt, image=init_image, engine_graph=neural_engine_graph, strength=0.75, guidance_scale=7.5).images
images[0].save('fantasy_landscape.png')
if (args.pipeline == 'instruction-tuning-sd'):
from ITREX_StableDiffusionInstructPix2PixPipeline import StableDiffusionInstructPix2PixPipeline
from diffusers.utils import load_image
pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(args.input_model, torch_dtype=torch.float32, use_auth_token=True)
image_path = args.image
image = load_image(image_path)
image = image.resize((512, 512))
image = pipeline(args.prompts, image=image, engine_graph=neural_engine_graph, num_inference_steps=args.steps).images[0]
save_time = time.strftime('_%H_%M_%S')
image.save((('image' + save_time) + '.png'))
return |
class SuperGlueConfig(datasets.BuilderConfig):
def __init__(self, features, data_url, citation, url, label_classes=('False', 'True'), few_shot_url=None, is_few_shot=False, train_path=None, pseudolabels_provided=False, **kwargs):
super(SuperGlueConfig, self).__init__(version=datasets.Version('1.0.3'), **kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
self.few_shot_url = few_shot_url
self.is_few_shot = is_few_shot
self.train_path = train_path
self.pseudolabels_provided = pseudolabels_provided |
((not torch.cuda.is_available()), 'No gpu available for cuda tests')
class BF16GradScalerTest(unittest.TestCase):
def setUp(self) -> None:
self.x = torch.randn(4, 4).cuda()
self.m = torch.nn.Linear(4, 1).cuda()
kwargs = {'lr': 0.1}
self.o = torch.optim.SGD(self.m.parameters(), **kwargs)
return super().setUp()
def tearDown(self) -> None:
del self.x
del self.m
del self.o
return super().tearDown()
def test_bf16_scaler_has_no_overflow(self):
scaler = BF16GradScaler()
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
y = self.m(self.x)
loss = y.mean()
scaler.scale(loss).backward()
scaler.step(self.o)
scaler.update()
self.assertFalse(scaler.has_overflow())
self.assertEqual(scaler._init_scale, 1.0)
self.assertEqual(scaler._backoff_factor, 1.0)
self.assertEqual(scaler._growth_factor, 1.0)
def test_bf16_scaler_has_overflow(self):
scaler = BF16GradScaler()
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
y = self.m(self.x)
loss = y.mean()
scaler.scale(loss).backward()
with torch.no_grad():
self.m.weight.grad.fill_(float('NaN'))
scaler.step(self.o)
scaler.update()
self.assertTrue(scaler.has_overflow())
self.assertEqual(scaler._init_scale, 1.0)
self.assertEqual(scaler._backoff_factor, 1.0)
self.assertEqual(scaler._growth_factor, 1.0)
def test_bf16_scaler_two_steps(self):
scaler = BF16GradScaler()
for i in range(2):
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
y = self.m(self.x)
loss = y.mean()
scaler.scale(loss).backward()
if (i == 0):
with torch.no_grad():
self.m.weight.grad.fill_(float('NaN'))
scaler.step(self.o)
scaler.update()
self.o.zero_grad()
if (i == 0):
self.assertTrue(scaler.has_overflow())
elif (i == 1):
self.assertFalse(scaler.has_overflow())
def test_bf16_sharded_scaler_has_overflow(self):
pg = DummyProcessGroup(rank=0, size=1)
scaler = BF16ShardedGradScaler(process_group=pg)
loss = torch.full((1,), 4.0, dtype=torch.float32, device='cpu')
t0 = torch.tensor([float('inf')], dtype=torch.float32, device='cpu')
t0.grad = t0.clone()
opt = torch.optim.SGD([t0], lr=1.0)
scaler.scale(loss)
scaler.step(opt)
self.assertTrue(scaler.has_overflow()) |
class MultiTaskModel(nn.Module):
def __init__(self, args, pair_encoder, FDS=None):
super(MultiTaskModel, self).__init__()
self.args = args
self.pair_encoder = pair_encoder
self.FDS = FDS
self.start_smooth = args.start_smooth
def build_regressor(self, task, d_inp):
layer = nn.Linear(d_inp, 1)
setattr(self, ('%s_pred_layer' % task.name), layer)
def forward(self, task=None, epoch=None, input1=None, input2=None, mask1=None, mask2=None, label=None, weight=None):
pred_layer = getattr(self, ('%s_pred_layer' % task.name))
pair_emb = self.pair_encoder(input1, input2, mask1, mask2)
pair_emb_s = pair_emb
if (self.training and (self.FDS is not None)):
if (epoch >= self.start_smooth):
pair_emb_s = self.FDS.smooth(pair_emb_s, label, epoch)
logits = pred_layer(pair_emb_s)
out = {}
if (self.training and (self.FDS is not None)):
out['embs'] = pair_emb
out['labels'] = label
if (self.args.loss == 'huber'):
loss = globals()[f'weighted_{self.args.loss}_loss'](inputs=logits, targets=(label / torch.tensor(5.0).cuda()), weights=weight, beta=self.args.huber_beta)
else:
loss = globals()[f'weighted_{self.args.loss}_loss'](inputs=logits, targets=(label / torch.tensor(5.0).cuda()), weights=weight)
out['logits'] = logits
label = label.squeeze((- 1)).data.cpu().numpy()
logits = logits.squeeze((- 1)).data.cpu().numpy()
task.scorer(logits, label)
out['loss'] = loss
return out |
_bpe('bytes')
class Bytes(object):
def __init__(self, *unused):
pass
def add_args(parser):
pass
def encode(x: str) -> str:
encoded = byte_encode(x)
escaped = encoded.replace(SPACE, SPACE_ESCAPE)
return SPACE.join(list(escaped))
def decode(x: str) -> str:
unescaped = x.replace(SPACE, '').replace(SPACE_ESCAPE, SPACE)
return smart_byte_decode(unescaped) |
def train_lower(u0, v0, ka0, kb0, x_minus, x_plus, y_minus, y_plus, lr_x=0.001, lr_k=0.01, max_iter=100, print_info=True):
device = x_minus.device
x_best = torch.zeros(x_minus.shape).to(device)
y_best = torch.zeros(x_minus.shape).to(device)
a_best = torch.zeros(x_minus.shape).to(device)
b_best = torch.zeros(x_minus.shape).to(device)
c_best = torch.zeros(x_minus.shape).to(device)
ka_best = torch.zeros(x_minus.shape).to(device)
kb_best = torch.zeros(x_minus.shape).to(device)
cubic = ((((- (x_plus - x_minus)) * (y_plus - y_minus)) * torch.tanh(x_minus)) * torch.sigmoid(y_plus))
cubic = torch.clamp(cubic, min=0.001)
v_best = (((- cubic) / cubic) * 10000)
u = u0.data.clone()
v = v0.data.clone()
ka = ka0.data.clone()
kb = kb0.data.clone()
u.requires_grad = True
v.requires_grad = True
ka.requires_grad = True
kb.requires_grad = True
optimizer_x = optim.Adam([u, v], lr=lr_x)
optimizer_k = optim.Adam([ka, kb], lr=lr_k)
max_iter = max_iter
for i in range(max_iter):
slop = 0.01
idx_x = (u <= x_plus).float()
u_minus = ((u * idx_x) + ((1 - idx_x) * ((slop * (u - x_plus)) + x_plus)))
v_plus = F.leaky_relu(v, negative_slope=slop)
idx_x = (u >= x_minus).float()
x = ((u_minus * idx_x) + ((1 - idx_x) * ((slop * (u_minus - x_minus)) + x_minus)))
idx_y = (v <= y_plus).float()
y = ((v_plus * idx_y) + ((1 - idx_y) * ((slop * (v_plus - y_plus)) + y_plus)))
(a, b, c) = plane(x, y)
idx = (x <= x_minus).float()
a = (a - (F.leaky_relu(ka, negative_slope=slop) * idx))
c = (c + ((F.leaky_relu(ka, negative_slope=slop) * x) * idx))
idx = (y >= y_plus).float()
b = (b + (F.leaky_relu(kb, negative_slope=slop) * idx))
c = (c - ((F.leaky_relu(kb, negative_slope=slop) * y) * idx))
(q_loss, valid) = qualification_loss(x_minus, x_plus, y_minus, y_plus, a, b, c, confidence=(- 0))
v_loss = get_volume(a, b, c, x_minus, x_plus, y_minus, y_plus)
v_loss = (v_loss / cubic)
if print_info:
print(('23l q loss: %.4f volume: %.4f' % (q_loss.mean().item(), v_loss.mean().item())))
loss = (q_loss - (v_loss * (valid.float() + 0.01))).mean()
best = ((v_loss > v_best) * valid)
v_best[best] = v_loss[best]
x_best[best] = x[best]
y_best[best] = y[best]
ka_best[best] = ka[best]
kb_best[best] = kb[best]
a_best[best] = a[best]
b_best[best] = b[best]
c_best[best] = c[best]
optimizer_x.zero_grad()
optimizer_k.zero_grad()
loss.backward()
idx = ((y > y_plus) * (kb > 0))
v.grad = (v.grad * (1 - idx.float()))
idx = ((x < x_minus) * (ka > 0))
u.grad = (u.grad * (1 - idx.float()))
optimizer_x.step()
optimizer_k.step()
return (x_best, y_best, ka_best, kb_best, a_best, b_best, c_best, v_best) |
def fake_env(env):
if hasattr(env, 'step_wait'):
if hasattr(env, 'venv'):
original = env.venv
(child, original) = fake_env(original)
env.venv = child
return (env, original)
else:
return (TestingVecEnv(env.num_envs, env.observation_space, env.action_space), env)
elif hasattr(env, 'env'):
original = env.env
(child, original) = fake_env(original)
env.env = child
return (env, original)
else:
return (TestingEnv(env.observation_space, env.action_space), env) |
def save_results(content, save_path, ori_shape):
ori_len = np.prod(ori_shape)
scale = int(np.sqrt((len(content) / ori_len)))
target_size = [int((size * scale)) for size in ori_shape[:2][::(- 1)]]
img = Image.frombytes('RGB', target_size, content, 'raw', 'BGR', 0, 0)
img.save(save_path) |
def expr_to_dict(e):
d = None
if isinstance(e, Var):
d = {'type': 'Var', 'name': e.name, 'primed': e.primed}
elif isinstance(e, Const):
d = {'type': 'Const', 'value': e.value}
else:
d = {'type': 'Op', 'name': e.name, 'args': list(map(expr_to_dict, e.args))}
d['original'] = e.original
return d |
def test_loss():
self = PartA2BboxHead(num_classes=3, seg_in_channels=16, part_in_channels=4, seg_conv_channels=[64, 64], part_conv_channels=[64, 64], merge_conv_channels=[128, 128], down_conv_channels=[128, 256], shared_fc_channels=[256, 512, 512, 512], cls_channels=[256, 256], reg_channels=[256, 256])
cls_score = torch.Tensor([[(- 3.681)], [(- 3.9413)], [(- 5.3971)], [(- 17.1281)], [(- 5.9434)], [(- 6.2251)]])
bbox_pred = torch.Tensor([[(- 0.0063016), (- 0.0052294), (- 0.012793), (- 0.010602), (- 0.), 0.0092471, 0.0073514], [(- 0.011975), (- 0.011578), (- 0.031219), 0.027754, 0.0069775, 0., 0.], [0.0037539, (- 0.0091897), (- 0.0053666), (- 1.038e-05), 0.0043467, 0.004247, 0.0018355], [(- 0.076093), (- 0.12497), (- 0.092942), 0.021404, 0.02375, 0.10365, (- 0.013042)], [0.0027577, (- 0.011514), (- 0.011097), (- 0.0024946), 0.0023268, 0.0016797, (- 0.0014076)], [0.0039635, (- 0.0078551), (- 0.0035125), 0., 0.0097042, 0.0017499, (- 0.0051254)]])
rois = torch.Tensor([[0.0, 13.3711, (- 12.5483), (- 1.9306), 1.7027, 4.2836, 1.4283, (- 1.1499)], [0.0, 19.2472, (- 7.2655), (- 10.6641), 3.3078, 83.1976, 29.3337, 2.4501], [0.0, 13.8012, (- 10.9791), (- 3.0617), 0.2504, 1.2518, 0.8807, 3.1034], [0.0, 16.2736, (- 9.0284), (- 2.0494), 8.2697, 31.2336, 9.1006, 1.9208], [0.0, 10.4462, (- 13.6879), (- 3.1869), 7.3366, 0.3518, 1.7199, (- 0.7225)], [0.0, 11.3374, (- 13.6671), (- 3.2332), 4.9934, 0.375, 1.6033, (- 0.9665)]])
labels = torch.Tensor([0.71, 0.0, 0.0, 0.0, 0.0, 0.0])
bbox_targets = torch.Tensor([[0.0598, 0.0243, (- 0.0984), (- 0.0454), 0.0066, 0.1114, 0.1714]])
pos_gt_bboxes = torch.Tensor([[13.6686, (- 12.5586), (- 2.1553), 1.6271, 4.3119, 1.5966, 2.1631]])
reg_mask = torch.Tensor([1, 0, 0, 0, 0, 0])
label_weights = torch.Tensor([0.0078, 0.0078, 0.0078, 0.0078, 0.0078, 0.0078])
bbox_weights = torch.Tensor([1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
loss = self.loss(cls_score, bbox_pred, rois, labels, bbox_targets, pos_gt_bboxes, reg_mask, label_weights, bbox_weights)
expected_loss_cls = torch.Tensor([0.020579, 0., 3.5252e-05, 0.0, 2.0433e-05, 1.5422e-05])
expected_loss_bbox = torch.as_tensor(0.0622)
expected_loss_corner = torch.Tensor([0.1379])
assert torch.allclose(loss['loss_cls'], expected_loss_cls, 0.001)
assert torch.allclose(loss['loss_bbox'], expected_loss_bbox, 0.001)
assert torch.allclose(loss['loss_corner'], expected_loss_corner, 0.001) |
class API():
def __init__(self, host=None, port=None, name='serving_stream'):
self.name = name
self.host = (host if host else 'localhost')
self.port = (port if port else '6379')
self.db = redis.StrictRedis(host=self.host, port=self.port, db=0)
try:
self.db.xgroup_create(name, 'serving')
except Exception:
print('redis group exist, will not create new one') |
def contains_sub_symbol(identifier):
if ('`' in identifier):
new_id = identifier
results = re.findall('`[^`]*`', new_id)
for item in results:
new_id = new_id.replace(item, '')
return ('_' in new_id)
return ('_' in identifier) |
def discriminator_wgan_gp(img, dim=64, reuse=True, training=True):
conv_ln_lrelu = partial(conv, normalizer_fn=ln, activation_fn=lrelu, biases_initializer=None)
with tf.variable_scope('discriminator', reuse=reuse):
y = lrelu(conv(img, dim, 5, 2))
y = conv_ln_lrelu(y, (dim * 2), 5, 2)
y = conv_ln_lrelu(y, (dim * 4), 5, 2)
y = conv_ln_lrelu(y, (dim * 8), 5, 2)
logit = fc(y, 1)
return logit |
def note_sequence_to_onsets_and_offsets_and_programs(ns: note_seq.NoteSequence) -> Tuple[(Sequence[float], Sequence[NoteEventData])]:
notes = sorted(ns.notes, key=(lambda note: (note.is_drum, note.program, note.pitch)))
times = ([note.end_time for note in notes if (not note.is_drum)] + [note.start_time for note in notes])
values = ([NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) for note in notes if (not note.is_drum)] + [NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) for note in notes])
return (times, values) |
def corrector_absresendgame_set(tol):
from phcpy.phcpy2c3 import py2c_set_value_of_continuation_parameter as set
return set(24, tol) |
def get_anno_ids(anno_path, pic_to_tensor_function, threshold):
pic = Image.open(anno_path)
tensor = pic_to_tensor_function(pic)
values = (tensor.view((- 1)).bincount() > threshold).nonzero().view((- 1)).tolist()
if (0 in values):
values.remove(0)
if (255 in values):
values.remove(255)
return values |
def clean_csv(input_file, output_file):
input_r = open(input_file, 'r').read()
lines = input_r.split('')
print(len(lines))
for line in lines[:10]:
print(line[(- 3):]) |
_module()
class PSENetTargets(PANetTargets):
def __init__(self, shrink_ratio=(1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4), max_shrink=20):
super().__init__(shrink_ratio=shrink_ratio, max_shrink=max_shrink) |
class FlaxStableDiffusionXLPipeline(metaclass=DummyObject):
_backends = ['flax', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['flax', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['flax', 'transformers']) |
def padded_nonzero(tensor, padding=0):
indices = padded_stack([tensor[i].nonzero().view((- 1)) for i in range(tensor.shape[0])], padding)
return indices |
def read_CR(path, seed=1234):
file_path = os.path.join(path, 'custrev.all')
(data, labels) = read_corpus(file_path)
random.seed(seed)
perm = list(range(len(data)))
random.shuffle(perm)
data = [data[i] for i in perm]
labels = [labels[i] for i in perm]
return (data, labels) |
class SqueezeAndExcitationBlock2D(_SqueezeAndExcitationBlockND):
def __init__(self, in_channels, reduction=16, dimension=2, **kwargs):
super().__init__(in_channels, reduction, dimension, **kwargs)
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim())) |
class RoundSTE(torch.autograd.Function):
def forward(ctx, x):
return torch.round(x)
def backward(ctx, grad):
return grad
def reverse(ctx, x):
return ((x + torch.rand_like(x)) - 0.5) |
class AugmentationList(Augmentation):
def __init__(self, augs):
super().__init__()
self.augs = [_transform_to_aug(x) for x in augs]
def __call__(self, aug_input) -> TransformList:
tfms = []
for x in self.augs:
tfm = x(aug_input)
tfms.append(tfm)
return TransformList(tfms)
def __repr__(self):
msgs = [str(x) for x in self.augs]
return 'AugmentationList[{}]'.format(', '.join(msgs))
__str__ = __repr__ |
class DiffusionDecoder():
def __init__(self, model: MultinomialDiffusion) -> None:
self.model = model
self.time_steps = model.time_steps
self.residues = model.residues
self.loss_function = DiffusionLoss(model=self.model)
def decode(self, spectra: torch.FloatTensor, spectra_padding_mask: torch.BoolTensor, precursors: torch.FloatTensor, initial_sequence: (None | torch.LongTensor)=None, start_step: int=DIFFUSION_START_STEP, eval_steps: tuple[(int, ...)]=DIFFUSION_EVAL_STEPS) -> tuple[(list[list[str]], list[float])]:
device = spectra.device
sequence_length = self.model.config.max_length
(batch_size, num_classes) = (spectra.size(0), len(self.model.residues))
if (initial_sequence is None):
initial_distribution = Categorical((torch.ones(batch_size, sequence_length, num_classes) / num_classes))
sample = initial_distribution.sample().to(device)
start_step = (self.time_steps - 1)
else:
sample = initial_sequence
peptide_mask = torch.zeros(batch_size, sequence_length).bool().to(device)
log_probs = torch.zeros((batch_size, sequence_length)).to(device)
for t in range(start_step, (- 1), (- 1)):
times = (t * torch.ones((batch_size,))).long().to(spectra.device)
distribution = Categorical(logits=self.model.reverse_distribution(x_t=sample, time=times, spectra=spectra, spectra_padding_mask=spectra_padding_mask, precursors=precursors, x_padding_mask=peptide_mask))
sample = distribution.sample()
losses = []
for t in eval_steps:
times = (t * torch.ones((batch_size,))).long().to(spectra.device)
losses.append(self.loss_function._compute_loss(x_0=sample, t=times, spectra=spectra, spectra_padding_mask=spectra_padding_mask, precursors=precursors, x_padding_mask=peptide_mask))
log_probs = (- torch.stack(losses).mean(axis=0).cpu()).tolist()
sequences = self._extract_predictions(sample)
return (sequences, log_probs)
def _extract_predictions(self, sample: torch.LongTensor) -> list[list[str]]:
output = []
for sequence in sample:
tokens = sequence.tolist()
if (self.residues.eos_index in sequence):
peptide = tokens[:tokens.index(self.residues.eos_index)]
else:
peptide = tokens
output.append(self.residues.decode(peptide))
return output |
class SpatialPath(BaseModule):
def __init__(self, in_channels=3, num_channels=(64, 64, 64, 128), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), init_cfg=None):
super(SpatialPath, self).__init__(init_cfg=init_cfg)
assert (len(num_channels) == 4), 'Length of input channels of Spatial Path must be 4!'
self.layers = []
for i in range(len(num_channels)):
layer_name = f'layer{(i + 1)}'
self.layers.append(layer_name)
if (i == 0):
self.add_module(layer_name, ConvModule(in_channels=in_channels, out_channels=num_channels[i], kernel_size=7, stride=2, padding=3, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
elif (i == (len(num_channels) - 1)):
self.add_module(layer_name, ConvModule(in_channels=num_channels[(i - 1)], out_channels=num_channels[i], kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
else:
self.add_module(layer_name, ConvModule(in_channels=num_channels[(i - 1)], out_channels=num_channels[i], kernel_size=3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
def forward(self, x):
for (i, layer_name) in enumerate(self.layers):
layer_stage = getattr(self, layer_name)
x = layer_stage(x)
return x |
def r_precision(r):
r = (np.asarray(r) != 0)
z = r.nonzero()[0]
if (not z.size):
return 0.0
return np.mean(r[:(z[(- 1)] + 1)]) |
def get_local_path_from_repo_id(repo_id, models_root=os.getenv('HF_HOME')):
if (models_root is None):
invalidInputError(False, errMsg='To use repo_id, you must set environmrnt variable `HF_HOME`.')
(repo_id, model_id) = repo_id.split('/')
cache_dir = os.path.join(models_root, 'diffusers', f'models--{repo_id}--{model_id}')
model_path = get_snapshot_dir_from_cache_dir(cache_dir)
return model_path |
def remove_node_by_span(tree, span, label, position, in_place):
nodes = tree.get_nodes('all', span[0], span[1])
nodes = [node for node in nodes if (node.label == label)]
if (len(nodes) <= position):
return (False, 'No node matching {} ({}, {} - {}) found'.format(position, label, *span))
return remove_node_by_node(nodes[position], in_place) |
class DPM_Solver():
def __init__(self, model_fn, noise_schedule, algorithm_type='dpmsolver++', correcting_x0_fn=None, correcting_xt_fn=None, thresholding_max_val=1.0, dynamic_thresholding_ratio=0.995):
self.model = (lambda x, t: model_fn(x, t.expand(x.shape[0])))
self.noise_schedule = noise_schedule
assert (algorithm_type in ['dpmsolver', 'dpmsolver++'])
self.algorithm_type = algorithm_type
if (correcting_x0_fn == 'dynamic_thresholding'):
self.correcting_x0_fn = self.dynamic_thresholding_fn
else:
self.correcting_x0_fn = correcting_x0_fn
self.correcting_xt_fn = correcting_xt_fn
self.dynamic_thresholding_ratio = dynamic_thresholding_ratio
self.thresholding_max_val = thresholding_max_val
def dynamic_thresholding_fn(self, x0, t):
dims = x0.dim()
p = self.dynamic_thresholding_ratio
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], (- 1))), p, dim=1)
s = expand_dims(torch.maximum(s, (self.thresholding_max_val * torch.ones_like(s).to(s.device))), dims)
x0 = (torch.clamp(x0, (- s), s) / s)
return x0
def noise_prediction_fn(self, x, t):
return self.model(x, t)
def data_prediction_fn(self, x, t):
noise = self.noise_prediction_fn(x, t)
(alpha_t, sigma_t) = (self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t))
x0 = ((x - (sigma_t * noise)) / alpha_t)
if (self.correcting_x0_fn is not None):
x0 = self.correcting_x0_fn(x0, t)
return x0
def model_fn(self, x, t):
if (self.algorithm_type == 'dpmsolver++'):
return self.data_prediction_fn(x, t)
else:
return self.noise_prediction_fn(x, t)
def get_time_steps(self, skip_type, t_T, t_0, N, device):
if (skip_type == 'logSNR'):
lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), (N + 1)).to(device)
return self.noise_schedule.inverse_lambda(logSNR_steps)
elif (skip_type == 'time_uniform'):
return torch.linspace(t_T, t_0, (N + 1)).to(device)
elif (skip_type == 'time_quadratic'):
t_order = 2
t = torch.linspace((t_T ** (1.0 / t_order)), (t_0 ** (1.0 / t_order)), (N + 1)).pow(t_order).to(device)
return t
else:
raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
if (order == 3):
K = ((steps // 3) + 1)
if ((steps % 3) == 0):
orders = (([3] * (K - 2)) + [2, 1])
elif ((steps % 3) == 1):
orders = (([3] * (K - 1)) + [1])
else:
orders = (([3] * (K - 1)) + [2])
elif (order == 2):
if ((steps % 2) == 0):
K = (steps // 2)
orders = ([2] * K)
else:
K = ((steps // 2) + 1)
orders = (([2] * (K - 1)) + [1])
elif (order == 1):
K = 1
orders = ([1] * steps)
else:
raise ValueError("'order' must be '1' or '2' or '3'.")
if (skip_type == 'logSNR'):
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
else:
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor(([0] + orders)), 0).to(device)]
return (timesteps_outer, orders)
def denoise_to_zero_fn(self, x, s):
return self.data_prediction_fn(x, s)
def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
ns = self.noise_schedule
dims = x.dim()
(lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t))
h = (lambda_t - lambda_s)
(log_alpha_s, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t))
(sigma_s, sigma_t) = (ns.marginal_std(s), ns.marginal_std(t))
alpha_t = torch.exp(log_alpha_t)
if (self.algorithm_type == 'dpmsolver++'):
phi_1 = torch.expm1((- h))
if (model_s is None):
model_s = self.model_fn(x, s)
x_t = (((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s))
if return_intermediate:
return (x_t, {'model_s': model_s})
else:
return x_t
else:
phi_1 = torch.expm1(h)
if (model_s is None):
model_s = self.model_fn(x, s)
x_t = ((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s))
if return_intermediate:
return (x_t, {'model_s': model_s})
else:
return x_t
def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpmsolver'):
if (solver_type not in ['dpmsolver', 'taylor']):
raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
if (r1 is None):
r1 = 0.5
ns = self.noise_schedule
(lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t))
h = (lambda_t - lambda_s)
lambda_s1 = (lambda_s + (r1 * h))
s1 = ns.inverse_lambda(lambda_s1)
(log_alpha_s, log_alpha_s1, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t))
(sigma_s, sigma_s1, sigma_t) = (ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t))
(alpha_s1, alpha_t) = (torch.exp(log_alpha_s1), torch.exp(log_alpha_t))
if (self.algorithm_type == 'dpmsolver++'):
phi_11 = torch.expm1(((- r1) * h))
phi_1 = torch.expm1((- h))
if (model_s is None):
model_s = self.model_fn(x, s)
x_s1 = (((sigma_s1 / sigma_s) * x) - ((alpha_s1 * phi_11) * model_s))
model_s1 = self.model_fn(x_s1, s1)
if (solver_type == 'dpmsolver'):
x_t = ((((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s)) - (((0.5 / r1) * (alpha_t * phi_1)) * (model_s1 - model_s)))
elif (solver_type == 'taylor'):
x_t = ((((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s)) + (((1.0 / r1) * (alpha_t * ((phi_1 / h) + 1.0))) * (model_s1 - model_s)))
else:
phi_11 = torch.expm1((r1 * h))
phi_1 = torch.expm1(h)
if (model_s is None):
model_s = self.model_fn(x, s)
x_s1 = ((torch.exp((log_alpha_s1 - log_alpha_s)) * x) - ((sigma_s1 * phi_11) * model_s))
model_s1 = self.model_fn(x_s1, s1)
if (solver_type == 'dpmsolver'):
x_t = (((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s)) - (((0.5 / r1) * (sigma_t * phi_1)) * (model_s1 - model_s)))
elif (solver_type == 'taylor'):
x_t = (((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s)) - (((1.0 / r1) * (sigma_t * ((phi_1 / h) - 1.0))) * (model_s1 - model_s)))
if return_intermediate:
return (x_t, {'model_s': model_s, 'model_s1': model_s1})
else:
return x_t
def singlestep_dpm_solver_third_update(self, x, s, t, r1=(1.0 / 3.0), r2=(2.0 / 3.0), model_s=None, model_s1=None, return_intermediate=False, solver_type='dpmsolver'):
if (solver_type not in ['dpmsolver', 'taylor']):
raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
if (r1 is None):
r1 = (1.0 / 3.0)
if (r2 is None):
r2 = (2.0 / 3.0)
ns = self.noise_schedule
(lambda_s, lambda_t) = (ns.marginal_lambda(s), ns.marginal_lambda(t))
h = (lambda_t - lambda_s)
lambda_s1 = (lambda_s + (r1 * h))
lambda_s2 = (lambda_s + (r2 * h))
s1 = ns.inverse_lambda(lambda_s1)
s2 = ns.inverse_lambda(lambda_s2)
(log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t) = (ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t))
(sigma_s, sigma_s1, sigma_s2, sigma_t) = (ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t))
(alpha_s1, alpha_s2, alpha_t) = (torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t))
if (self.algorithm_type == 'dpmsolver++'):
phi_11 = torch.expm1(((- r1) * h))
phi_12 = torch.expm1(((- r2) * h))
phi_1 = torch.expm1((- h))
phi_22 = ((torch.expm1(((- r2) * h)) / (r2 * h)) + 1.0)
phi_2 = ((phi_1 / h) + 1.0)
phi_3 = ((phi_2 / h) - 0.5)
if (model_s is None):
model_s = self.model_fn(x, s)
if (model_s1 is None):
x_s1 = (((sigma_s1 / sigma_s) * x) - ((alpha_s1 * phi_11) * model_s))
model_s1 = self.model_fn(x_s1, s1)
x_s2 = ((((sigma_s2 / sigma_s) * x) - ((alpha_s2 * phi_12) * model_s)) + (((r2 / r1) * (alpha_s2 * phi_22)) * (model_s1 - model_s)))
model_s2 = self.model_fn(x_s2, s2)
if (solver_type == 'dpmsolver'):
x_t = ((((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s)) + (((1.0 / r2) * (alpha_t * phi_2)) * (model_s2 - model_s)))
elif (solver_type == 'taylor'):
D1_0 = ((1.0 / r1) * (model_s1 - model_s))
D1_1 = ((1.0 / r2) * (model_s2 - model_s))
D1 = (((r2 * D1_0) - (r1 * D1_1)) / (r2 - r1))
D2 = ((2.0 * (D1_1 - D1_0)) / (r2 - r1))
x_t = (((((sigma_t / sigma_s) * x) - ((alpha_t * phi_1) * model_s)) + ((alpha_t * phi_2) * D1)) - ((alpha_t * phi_3) * D2))
else:
phi_11 = torch.expm1((r1 * h))
phi_12 = torch.expm1((r2 * h))
phi_1 = torch.expm1(h)
phi_22 = ((torch.expm1((r2 * h)) / (r2 * h)) - 1.0)
phi_2 = ((phi_1 / h) - 1.0)
phi_3 = ((phi_2 / h) - 0.5)
if (model_s is None):
model_s = self.model_fn(x, s)
if (model_s1 is None):
x_s1 = ((torch.exp((log_alpha_s1 - log_alpha_s)) * x) - ((sigma_s1 * phi_11) * model_s))
model_s1 = self.model_fn(x_s1, s1)
x_s2 = (((torch.exp((log_alpha_s2 - log_alpha_s)) * x) - ((sigma_s2 * phi_12) * model_s)) - (((r2 / r1) * (sigma_s2 * phi_22)) * (model_s1 - model_s)))
model_s2 = self.model_fn(x_s2, s2)
if (solver_type == 'dpmsolver'):
x_t = (((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s)) - (((1.0 / r2) * (sigma_t * phi_2)) * (model_s2 - model_s)))
elif (solver_type == 'taylor'):
D1_0 = ((1.0 / r1) * (model_s1 - model_s))
D1_1 = ((1.0 / r2) * (model_s2 - model_s))
D1 = (((r2 * D1_0) - (r1 * D1_1)) / (r2 - r1))
D2 = ((2.0 * (D1_1 - D1_0)) / (r2 - r1))
x_t = ((((torch.exp((log_alpha_t - log_alpha_s)) * x) - ((sigma_t * phi_1) * model_s)) - ((sigma_t * phi_2) * D1)) - ((sigma_t * phi_3) * D2))
if return_intermediate:
return (x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2})
else:
return x_t
def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):
if (solver_type not in ['dpmsolver', 'taylor']):
raise ValueError("'solver_type' must be either 'dpmsolver' or 'taylor', got {}".format(solver_type))
ns = self.noise_schedule
(model_prev_1, model_prev_0) = (model_prev_list[(- 2)], model_prev_list[(- 1)])
(t_prev_1, t_prev_0) = (t_prev_list[(- 2)], t_prev_list[(- 1)])
(lambda_prev_1, lambda_prev_0, lambda_t) = (ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t))
(log_alpha_prev_0, log_alpha_t) = (ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t))
(sigma_prev_0, sigma_t) = (ns.marginal_std(t_prev_0), ns.marginal_std(t))
alpha_t = torch.exp(log_alpha_t)
h_0 = (lambda_prev_0 - lambda_prev_1)
h = (lambda_t - lambda_prev_0)
r0 = (h_0 / h)
D1_0 = ((1.0 / r0) * (model_prev_0 - model_prev_1))
if (self.algorithm_type == 'dpmsolver++'):
phi_1 = torch.expm1((- h))
if (solver_type == 'dpmsolver'):
x_t = ((((sigma_t / sigma_prev_0) * x) - ((alpha_t * phi_1) * model_prev_0)) - ((0.5 * (alpha_t * phi_1)) * D1_0))
elif (solver_type == 'taylor'):
x_t = ((((sigma_t / sigma_prev_0) * x) - ((alpha_t * phi_1) * model_prev_0)) + ((alpha_t * ((phi_1 / h) + 1.0)) * D1_0))
else:
phi_1 = torch.expm1(h)
if (solver_type == 'dpmsolver'):
x_t = (((torch.exp((log_alpha_t - log_alpha_prev_0)) * x) - ((sigma_t * phi_1) * model_prev_0)) - ((0.5 * (sigma_t * phi_1)) * D1_0))
elif (solver_type == 'taylor'):
x_t = (((torch.exp((log_alpha_t - log_alpha_prev_0)) * x) - ((sigma_t * phi_1) * model_prev_0)) - ((sigma_t * ((phi_1 / h) - 1.0)) * D1_0))
return x_t
def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):
ns = self.noise_schedule
(model_prev_2, model_prev_1, model_prev_0) = model_prev_list
(t_prev_2, t_prev_1, t_prev_0) = t_prev_list
(lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t) = (ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t))
(log_alpha_prev_0, log_alpha_t) = (ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t))
(sigma_prev_0, sigma_t) = (ns.marginal_std(t_prev_0), ns.marginal_std(t))
alpha_t = torch.exp(log_alpha_t)
h_1 = (lambda_prev_1 - lambda_prev_2)
h_0 = (lambda_prev_0 - lambda_prev_1)
h = (lambda_t - lambda_prev_0)
(r0, r1) = ((h_0 / h), (h_1 / h))
D1_0 = ((1.0 / r0) * (model_prev_0 - model_prev_1))
D1_1 = ((1.0 / r1) * (model_prev_1 - model_prev_2))
D1 = (D1_0 + ((r0 / (r0 + r1)) * (D1_0 - D1_1)))
D2 = ((1.0 / (r0 + r1)) * (D1_0 - D1_1))
if (self.algorithm_type == 'dpmsolver++'):
phi_1 = torch.expm1((- h))
phi_2 = ((phi_1 / h) + 1.0)
phi_3 = ((phi_2 / h) - 0.5)
x_t = (((((sigma_t / sigma_prev_0) * x) - ((alpha_t * phi_1) * model_prev_0)) + ((alpha_t * phi_2) * D1)) - ((alpha_t * phi_3) * D2))
else:
phi_1 = torch.expm1(h)
phi_2 = ((phi_1 / h) - 1.0)
phi_3 = ((phi_2 / h) - 0.5)
x_t = ((((torch.exp((log_alpha_t - log_alpha_prev_0)) * x) - ((sigma_t * phi_1) * model_prev_0)) - ((sigma_t * phi_2) * D1)) - ((sigma_t * phi_3) * D2))
return x_t
def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpmsolver', r1=None, r2=None):
if (order == 1):
return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
elif (order == 2):
return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1)
elif (order == 3):
return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2)
else:
raise ValueError('Solver order must be 1 or 2 or 3, got {}'.format(order))
def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpmsolver'):
if (order == 1):
return self.dpm_solver_first_update(x, t_prev_list[(- 1)], t, model_s=model_prev_list[(- 1)])
elif (order == 2):
return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
elif (order == 3):
return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
else:
raise ValueError('Solver order must be 1 or 2 or 3, got {}'.format(order))
def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-05, solver_type='dpmsolver'):
ns = self.noise_schedule
s = (t_T * torch.ones((1,)).to(x))
lambda_s = ns.marginal_lambda(s)
lambda_0 = ns.marginal_lambda((t_0 * torch.ones_like(s).to(x)))
h = (h_init * torch.ones_like(s).to(x))
x_prev = x
nfe = 0
if (order == 2):
r1 = 0.5
lower_update = (lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True))
higher_update = (lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs))
elif (order == 3):
(r1, r2) = ((1.0 / 3.0), (2.0 / 3.0))
lower_update = (lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type))
higher_update = (lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs))
else:
raise ValueError('For adaptive step size solver, order must be 2 or 3, got {}'.format(order))
while (torch.abs((s - t_0)).mean() > t_err):
t = ns.inverse_lambda((lambda_s + h))
(x_lower, lower_noise_kwargs) = lower_update(x, s, t)
x_higher = higher_update(x, s, t, **lower_noise_kwargs)
delta = torch.max((torch.ones_like(x).to(x) * atol), (rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))))
norm_fn = (lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], (- 1)))).mean(dim=(- 1), keepdim=True)))
E = norm_fn(((x_higher - x_lower) / delta)).max()
if torch.all((E <= 1.0)):
x = x_higher
s = t
x_prev = x_lower
lambda_s = ns.marginal_lambda(s)
h = torch.min(((theta * h) * torch.float_power(E, ((- 1.0) / order)).float()), (lambda_0 - lambda_s))
nfe += order
print('adaptive solver nfe', nfe)
return x
def add_noise(self, x, t, noise=None):
(alpha_t, sigma_t) = (self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t))
if (noise is None):
noise = torch.randn((t.shape[0], *x.shape), device=x.device)
x = x.reshape(((- 1), *x.shape))
xt = ((expand_dims(alpha_t, x.dim()) * x) + (expand_dims(sigma_t, x.dim()) * noise))
if (t.shape[0] == 1):
return xt.squeeze(0)
else:
return xt
def inverse(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform', method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver', atol=0.0078, rtol=0.05, return_intermediate=False):
t_0 = ((1.0 / self.noise_schedule.total_N) if (t_start is None) else t_start)
t_T = (self.noise_schedule.T if (t_end is None) else t_end)
assert ((t_0 > 0) and (t_T > 0)), 'Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array'
return self.sample(x, steps=steps, t_start=t_0, t_end=t_T, order=order, skip_type=skip_type, method=method, lower_order_final=lower_order_final, denoise_to_zero=denoise_to_zero, solver_type=solver_type, atol=atol, rtol=rtol, return_intermediate=return_intermediate)
def sample(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform', method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver', atol=0.0078, rtol=0.05, return_intermediate=False):
t_0 = ((1.0 / self.noise_schedule.total_N) if (t_end is None) else t_end)
t_T = (self.noise_schedule.T if (t_start is None) else t_start)
assert ((t_0 > 0) and (t_T > 0)), 'Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array'
if return_intermediate:
assert (method in ['multistep', 'singlestep', 'singlestep_fixed']), 'Cannot use adaptive solver when saving intermediate values'
if (self.correcting_xt_fn is not None):
assert (method in ['multistep', 'singlestep', 'singlestep_fixed']), 'Cannot use adaptive solver when correcting_xt_fn is not None'
device = x.device
intermediates = []
with torch.no_grad():
if (method == 'adaptive'):
x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type)
elif (method == 'multistep'):
assert (steps >= order)
timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
assert ((timesteps.shape[0] - 1) == steps)
step = 0
t = timesteps[step]
t_prev_list = [t]
model_prev_list = [self.model_fn(x, t)]
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, step)
if return_intermediate:
intermediates.append(x)
for step in range(1, order):
t = timesteps[step]
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step, solver_type=solver_type)
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, step)
if return_intermediate:
intermediates.append(x)
t_prev_list.append(t)
model_prev_list.append(self.model_fn(x, t))
for step in range(order, (steps + 1)):
t = timesteps[step]
if (lower_order_final and (steps < 10)):
step_order = min(order, ((steps + 1) - step))
else:
step_order = order
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step_order, solver_type=solver_type)
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, step)
if return_intermediate:
intermediates.append(x)
for i in range((order - 1)):
t_prev_list[i] = t_prev_list[(i + 1)]
model_prev_list[i] = model_prev_list[(i + 1)]
t_prev_list[(- 1)] = t
if (step < steps):
model_prev_list[(- 1)] = self.model_fn(x, t)
elif (method in ['singlestep', 'singlestep_fixed']):
if (method == 'singlestep'):
(timesteps_outer, orders) = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device)
elif (method == 'singlestep_fixed'):
K = (steps // order)
orders = ([order] * K)
timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
for (step, order) in enumerate(orders):
(s, t) = (timesteps_outer[step], timesteps_outer[(step + 1)])
timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=s.item(), t_0=t.item(), N=order, device=device)
lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
h = (lambda_inner[(- 1)] - lambda_inner[0])
r1 = (None if (order <= 1) else ((lambda_inner[1] - lambda_inner[0]) / h))
r2 = (None if (order <= 2) else ((lambda_inner[2] - lambda_inner[0]) / h))
x = self.singlestep_dpm_solver_update(x, s, t, order, solver_type=solver_type, r1=r1, r2=r2)
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, step)
if return_intermediate:
intermediates.append(x)
else:
raise ValueError('Got wrong method {}'.format(method))
if denoise_to_zero:
t = (torch.ones((1,)).to(device) * t_0)
x = self.denoise_to_zero_fn(x, t)
if (self.correcting_xt_fn is not None):
x = self.correcting_xt_fn(x, t, (step + 1))
if return_intermediate:
intermediates.append(x)
if return_intermediate:
return (x, intermediates)
else:
return x |
def remove_done_folders(task, folders_to_convert, data_dir, save_dir, store_prediction, store_representation):
rgb_dir = os.path.join(data_dir, 'rgb')
encoding_dir = os.path.join(save_dir, f'{task}_encoding')
decoding_dir = os.path.join(save_dir, f'{task}_decoding')
folders_to_use = set()
for folder in folders_to_convert:
rgb_folder = os.path.join(rgb_dir, folder)
decoding_folder = os.path.join(decoding_dir, folder)
encoding_folder = os.path.join(encoding_dir, folder)
if (not os.path.exists(rgb_folder)):
print(f'Skipping {folder} because no rgb folder (but is that true? This is probably caused by a bug somewhere)')
continue
if (store_representation and (not os.path.exists(encoding_folder))):
folders_to_use.add(folder)
elif (store_representation and (len(os.listdir(encoding_folder)) != len(os.listdir(rgb_folder)))):
folders_to_use.add(folder)
if (store_prediction and (not os.path.exists(decoding_folder))):
folders_to_use.add(folder)
elif (store_prediction and (len(os.listdir(decoding_folder)) != len(os.listdir(rgb_folder)))):
folders_to_use.add(folder)
return list(folders_to_use) |
def leg_pose_to_motor_angles_with_half_pi_offset_and_safety(leg_pose):
motor_angles = []
for idx in range(4):
swing = leg_pose[(idx * 2)]
extend = leg_pose[((idx * 2) + 1)]
motor_angles.extend(swing_extend_to_motor_angles(idx, swing, extend))
return motor_angles |
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None):
assert ((reduction == 'mean') and (avg_factor is None))
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[(inds, label)].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None] |
class BertPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def create_new_model_like(model_type: str, new_model_patterns: ModelPatterns, add_copied_from: bool=True, frameworks: Optional[List[str]]=None):
model_info = retrieve_info_for_model(model_type, frameworks=frameworks)
model_files = model_info['model_files']
old_model_patterns = model_info['model_patterns']
keep_old_processing = True
for processing_attr in ['feature_extractor_class', 'processor_class', 'tokenizer_class']:
if (getattr(old_model_patterns, processing_attr) != getattr(new_model_patterns, processing_attr)):
keep_old_processing = False
model_classes = model_info['model_classes']
old_module_name = model_files['module_name']
module_folder = ((TRANSFORMERS_PATH / 'models') / new_model_patterns.model_lower_cased)
os.makedirs(module_folder, exist_ok=True)
files_to_adapt = model_files['model_files']
if keep_old_processing:
files_to_adapt = [f for f in files_to_adapt if (('tokenization' not in str(f)) and ('processing' not in str(f)) and ('feature_extraction' not in str(f)))]
os.makedirs(module_folder, exist_ok=True)
for module_file in files_to_adapt:
new_module_name = module_file.name.replace(old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased)
dest_file = (module_folder / new_module_name)
duplicate_module(module_file, old_model_patterns, new_model_patterns, dest_file=dest_file, add_copied_from=(add_copied_from and ('modeling' in new_module_name)))
clean_frameworks_in_init((module_folder / '__init__.py'), frameworks=frameworks, keep_processing=(not keep_old_processing))
add_content_to_file(((TRANSFORMERS_PATH / 'models') / '__init__.py'), f' {new_model_patterns.model_lower_cased},', add_after=f' {old_module_name},', exact_match=True)
add_model_to_main_init(old_model_patterns, new_model_patterns, frameworks=frameworks, with_processing=(not keep_old_processing))
files_to_adapt = model_files['test_files']
if keep_old_processing:
files_to_adapt = [f for f in files_to_adapt if (('tokenization' not in str(f)) and ('processor' not in str(f)) and ('feature_extraction' not in str(f)))]
for test_file in files_to_adapt:
new_test_file_name = test_file.name.replace(old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased)
dest_file = (test_file.parent / new_test_file_name)
duplicate_module(test_file, old_model_patterns, new_model_patterns, dest_file=dest_file, add_copied_from=False)
add_model_to_auto_classes(old_model_patterns, new_model_patterns, model_classes)
doc_file = ((((REPO_PATH / 'docs') / 'source') / 'model_doc') / f'{old_model_patterns.model_type}.mdx')
duplicate_doc_file(doc_file, old_model_patterns, new_model_patterns, frameworks=frameworks)
if (old_model_patterns.model_type == old_model_patterns.checkpoint):
print(f"The model you picked has the same name for the model type and the checkpoint name ({old_model_patterns.model_type}). As a result, it's possible some places where the new checkpoint should be, you have {new_model_patterns.model_type} instead. You should search for all instances of {new_model_patterns.model_type} in the new files and check they're not badly used as checkpoints.")
elif (old_model_patterns.model_lower_cased == old_model_patterns.checkpoint):
print(f"The model you picked has the same name for the model type and the checkpoint name ({old_model_patterns.model_lower_cased}). As a result, it's possible some places where the new checkpoint should be, you have {new_model_patterns.model_lower_cased} instead. You should search for all instances of {new_model_patterns.model_lower_cased} in the new files and check they're not badly used as checkpoints.")
if ((old_model_patterns.model_type == old_model_patterns.model_lower_cased) and (new_model_patterns.model_type != new_model_patterns.model_lower_cased)):
print(f"The model you picked has the same name for the model type and the lowercased model name ({old_model_patterns.model_lower_cased}). As a result, it's possible some places where the new model type should be, you have {new_model_patterns.model_lower_cased} instead. You should search for all instances of {new_model_patterns.model_lower_cased} in the new files and check they're not badly used as the model type.")
if ((not keep_old_processing) and (old_model_patterns.tokenizer_class is not None)):
print('The constants at the start of the new tokenizer file created needs to be manually fixed. If your new model has a tokenizer fast, you will also need to manually add the converter in the `SLOW_TO_FAST_CONVERTERS` constant of `convert_slow_tokenizer.py`.') |
class EncoderDecoderTests(tf.test.TestCase):
def setUp(self):
super(EncoderDecoderTests, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.batch_size = 2
self.input_depth = 4
self.sequence_length = 10
self.vocab_list = [str(_) for _ in range(10)]
self.vocab_list += ['', '', '', '', '^_^']
self.vocab_size = len(self.vocab_list)
self.vocab_file = test_utils.create_temporary_vocab_file(self.vocab_list)
self.vocab_info = vocab.get_vocab_info(self.vocab_file.name)
tf.contrib.framework.get_or_create_global_step()
def tearDown(self):
self.vocab_file.close()
def create_model(self, _mode, _params=None):
self.skipTest('Base module should not be tested.')
def _create_example(self):
source = np.random.randn(self.batch_size, self.sequence_length, self.input_depth)
source_len = np.random.randint(0, self.sequence_length, [self.batch_size])
target_len = np.random.randint(0, (self.sequence_length * 2), [self.batch_size])
target = np.random.randn(self.batch_size, np.max(target_len), self.input_depth)
labels = np.random.randint(0, self.vocab_size, [self.batch_size, (np.max(target_len) - 1)])
example_ = namedtuple('Example', ['source', 'source_len', 'target', 'target_len', 'labels'])
return example_(source, source_len, target, target_len, labels)
def _test_pipeline(self, mode, params=None):
source_len = (self.sequence_length + 5)
target_len = (self.sequence_length + 10)
source = ' '.join(np.random.choice(self.vocab_list, source_len))
target = ' '.join(np.random.choice(self.vocab_list, target_len))
(sources_file, targets_file) = test_utils.create_temp_parallel_data(sources=[source], targets=[target])
model = self.create_model(mode, params)
input_pipeline_ = input_pipeline.ParallelTextInputPipeline(params={'source_files': [sources_file.name], 'target_files': [targets_file.name]}, mode=mode)
input_fn = training_utils.create_input_fn(pipeline=input_pipeline_, batch_size=self.batch_size)
(features, labels) = input_fn()
fetches = model(features, labels, None)
fetches = [_ for _ in fetches if (_ is not None)]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
with tf.contrib.slim.queues.QueueRunners(sess):
fetches_ = sess.run(fetches)
sources_file.close()
targets_file.close()
return (model, fetches_)
def test_train(self):
(model, fetches_) = self._test_pipeline(tf.contrib.learn.ModeKeys.TRAIN)
(predictions_, loss_, _) = fetches_
target_len = ((self.sequence_length + 10) + 2)
max_decode_length = model.params['target.max_seq_len']
expected_decode_len = np.minimum(target_len, max_decode_length)
np.testing.assert_array_equal(predictions_['logits'].shape, [self.batch_size, (expected_decode_len - 1), model.target_vocab_info.total_size])
np.testing.assert_array_equal(predictions_['losses'].shape, [self.batch_size, (expected_decode_len - 1)])
np.testing.assert_array_equal(predictions_['predicted_ids'].shape, [self.batch_size, (expected_decode_len - 1)])
self.assertFalse(np.isnan(loss_))
def test_infer(self):
(model, fetches_) = self._test_pipeline(tf.contrib.learn.ModeKeys.INFER)
(predictions_,) = fetches_
pred_len = predictions_['predicted_ids'].shape[1]
np.testing.assert_array_equal(predictions_['logits'].shape, [self.batch_size, pred_len, model.target_vocab_info.total_size])
np.testing.assert_array_equal(predictions_['predicted_ids'].shape, [self.batch_size, pred_len])
def test_infer_beam_search(self):
self.batch_size = 1
beam_width = 10
(model, fetches_) = self._test_pipeline(mode=tf.contrib.learn.ModeKeys.INFER, params={'inference.beam_search.beam_width': 10})
(predictions_,) = fetches_
pred_len = predictions_['predicted_ids'].shape[1]
vocab_size = model.target_vocab_info.total_size
np.testing.assert_array_equal(predictions_['predicted_ids'].shape, [1, pred_len, beam_width])
np.testing.assert_array_equal(predictions_['beam_search_output.beam_parent_ids'].shape, [1, pred_len, beam_width])
np.testing.assert_array_equal(predictions_['beam_search_output.scores'].shape, [1, pred_len, beam_width])
np.testing.assert_array_equal(predictions_['beam_search_output.original_outputs.predicted_ids'].shape, [1, pred_len, beam_width])
np.testing.assert_array_equal(predictions_['beam_search_output.original_outputs.logits'].shape, [1, pred_len, beam_width, vocab_size]) |
def main(opt):
logger.setLevel(logging.INFO)
result_root = opt.out_root
result_json_root = osp.join(result_root, 'json')
mkdir_if_missing(result_json_root)
transforms = T.Compose([T.ToTensor(), T.Normalize(opt.im_mean, opt.im_std)])
obs_root = osp.join(opt.data_root, 'obs', opt.split, opt.obid)
obs_jpaths = [osp.join(obs_root, o) for o in os.listdir(obs_root)]
obs_jpaths = sorted([o for o in obs_jpaths if o.endswith('.json')])
accs = []
(timer_avgs, timer_calls) = ([], [])
for (i, obs_jpath) in enumerate(obs_jpaths):
seqname = obs_jpath.split('/')[(- 1)].split('.')[0]
output_dir = osp.join(result_root, 'frame', seqname)
dataloader = videodataset.LoadImagesAndPoseObs(obs_jpath, opt)
(seq_res, ta, tc) = eval_seq(opt, dataloader, save_dir=output_dir)
seq_json = fuse_result(seq_res, obs_jpath)
with open(osp.join(result_json_root, '{}.json'.format(seqname)), 'w') as f:
json.dump(seq_json, f)
timer_avgs.append(ta)
timer_calls.append(tc)
logger.info('Evaluate seq: {}'.format(seqname))
if opt.save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seqname))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = (all_time / np.sum(timer_calls))
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, (1.0 / avg_time)))
cmd_str = 'python ./eval/poseval/evaluate.py --groundTruth={}/posetrack_data/annotations/{} --predictions={}/ --evalPoseTracking'.format(opt.data_root, opt.split, result_json_root)
os.system(cmd_str) |
def jaccard_simple(annotation, segmentation):
annotation = annotation.astype(np.bool)
segmentation = segmentation.astype(np.bool)
if (np.isclose(np.sum(annotation), 0) and np.isclose(np.sum(segmentation), 0)):
return 1
else:
return (np.sum((annotation & segmentation)) / np.sum((annotation | segmentation), dtype=np.float32)) |
class ROIPoolingParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ROIPOOLINGPARAMETER |
class ROIAlignRotated(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlignRotated, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
assert ((rois.dim() == 2) and (rois.size(1) == 6))
orig_dtype = input.dtype
if (orig_dtype == torch.float16):
input = input.float()
rois = rois.float()
output_size = _pair(self.output_size)
if (torch.jit.is_scripting() or torch.jit.is_tracing()):
return torch.ops.detectron2.roi_align_rotated_forward(input, rois, self.spatial_scale, output_size[0], output_size[1], self.sampling_ratio).to(dtype=orig_dtype)
return roi_align_rotated(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio).to(dtype=orig_dtype)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += (', sampling_ratio=' + str(self.sampling_ratio))
tmpstr += ')'
return tmpstr |
def _collate_fn(batch: List[Dict]) -> Tuple[(torch.tensor, str, str)]:
(ims, filenames, bad_images) = ([], [], [])
for b in batch:
im = b['image']
if (im is not None):
ims.append(im)
filenames.append(b['filename'])
else:
bad_images.append(b['filename'])
return (torch.stack(ims), filenames, bad_images) |
def clean_standard_data(standard_record):
def sanitize_sex(sex):
sex = sex.lower()
if (('f' in sex) or ('w' in sex)):
return 'F'
else:
return 'M'
def sanitize_age(age):
for possible_age in age.split(' '):
try:
return int(possible_age)
except:
pass
def sanitize_finding(finding):
if finding:
if ('covid19' in finding.lower().replace('-', '').replace(' ', '')):
return 'COVID-19'
else:
return finding
return finding
print(standard_record)
sex = standard_record['patient']['sex']
if (not (sex in ['M', 'F'])):
standard_record['patient']['sex'] = sanitize_sex(sex)
standard_record['patient']['age'] = sanitize_age(standard_record['patient']['age'])
standard_record['patient']['finding'] = sanitize_finding(standard_record['patient']['finding'])
return standard_record |
def process_task(task, token_indexer, vocab):
if (hasattr(task, 'train_data_text') and (task.train_data_text is not None)):
train = process_split(task.train_data_text, token_indexer)
else:
train = None
if (hasattr(task, 'val_data_text') and (task.val_data_text is not None)):
val = process_split(task.val_data_text, token_indexer)
else:
val = None
if (hasattr(task, 'test_data_text') and (task.test_data_text is not None)):
test = process_split(task.test_data_text, token_indexer)
else:
test = None
for instance in ((train + val) + test):
instance.index_fields(vocab)
return (train, val, test) |
def extract(fxml):
(sentlist, constlist) = reader(fxml)
sentlist = combine(sentlist, constlist)
fconll = fxml.replace('.xml', '.conll')
writer(sentlist, fconll) |
def _load_shared_library(lib_base_name: str):
(_base_path, _lib_paths) = get_shared_lib_info(lib_base_name=lib_base_name)
if ('GPTNEOX_CPP_LIB' in os.environ):
lib_base_name = os.environ['GPTNEOX_CPP_LIB']
_lib = pathlib.Path(lib_base_name)
_base_path = _lib.parent.resolve()
_lib_paths = [_lib.resolve()]
cdll_args = dict()
if ((sys.platform == 'win32') and (sys.version_info >= (3, 8))):
os.add_dll_directory(str(_base_path))
os.environ['PATH'] = ((str(_base_path) + ';') + os.environ['PATH'])
cdll_args['winmode'] = 0
for _lib_path in _lib_paths:
if _lib_path.exists():
try:
return ctypes.CDLL(str(_lib_path), **cdll_args)
except Exception as e:
invalidInputError(False, f"Failed to load shared library '{_lib_path}': {e}.")
invalidInputError(False, f"Shared library with base name '{lib_base_name}' not found.") |
def test_capture_function_twice(ing):
def foo(something):
pass
assert (ing.captured_functions == [foo])
ing.capture(foo)
assert (ing.captured_functions == [foo]) |
class ExpReduceMaxLROnIteration():
def __init__(self, gamma=1):
self.gamma = gamma
def __call__(self, eta_min, eta_max, iterations):
return (eta_min, (eta_max * (self.gamma ** iterations))) |
class YoloLayer(nn.Module):
def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1, use_cuda=None):
super(YoloLayer, self).__init__()
use_cuda = (torch.cuda.is_available() and (True if (use_cuda is None) else use_cuda))
self.device = torch.device(('cuda' if use_cuda else 'cpu'))
self.anchor_mask = anchor_mask
self.num_classes = num_classes
self.anchors = anchors
self.num_anchors = num_anchors
self.anchor_step = (len(anchors) // num_anchors)
self.rescore = 0
self.ignore_thresh = 0.5
self.truth_thresh = 1.0
self.stride = 32
self.nth_layer = 0
self.seen = 0
self.net_width = 0
self.net_height = 0
def get_mask_boxes(self, output):
masked_anchors = []
for m in self.anchor_mask:
masked_anchors += self.anchors[(m * self.anchor_step):((m + 1) * self.anchor_step)]
masked_anchors = [(anchor / self.stride) for anchor in masked_anchors]
masked_anchors = torch.FloatTensor(masked_anchors).to(self.device)
num_anchors = torch.IntTensor([len(self.anchor_mask)]).to(self.device)
return {'x': output, 'a': masked_anchors, 'n': num_anchors}
def build_targets(self, pred_boxes, target, anchors, nA, nH, nW):
nB = target.size(0)
anchor_step = anchors.size(1)
conf_mask = torch.ones(nB, nA, nH, nW)
coord_mask = torch.zeros(nB, nA, nH, nW)
cls_mask = torch.zeros(nB, nA, nH, nW)
tcoord = torch.zeros(4, nB, nA, nH, nW)
tconf = torch.zeros(nB, nA, nH, nW)
tcls = torch.zeros(nB, nA, nH, nW)
(twidth, theight) = ((self.net_width / self.stride), (self.net_height / self.stride))
nAnchors = ((nA * nH) * nW)
nPixels = (nH * nW)
nGT = 0
nRecall = 0
nRecall75 = 0
anchors = anchors.to('cpu')
for b in range(nB):
cur_pred_boxes = pred_boxes[(b * nAnchors):((b + 1) * nAnchors)].t()
cur_ious = torch.zeros(nAnchors)
tbox = target[b].view((- 1), 5).to('cpu')
for t in range(50):
if (tbox[t][1] == 0):
break
(gx, gy) = ((tbox[t][1] * nW), (tbox[t][2] * nH))
(gw, gh) = ((tbox[t][3] * twidth), (tbox[t][4] * theight))
cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors, 1).t()
cur_ious = torch.max(cur_ious, multi_bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))
ignore_ix = (cur_ious > self.ignore_thresh)
conf_mask[b][ignore_ix.view(nA, nH, nW)] = 0
for t in range(50):
if (tbox[t][1] == 0):
break
nGT += 1
(gx, gy) = ((tbox[t][1] * nW), (tbox[t][2] * nH))
(gw, gh) = ((tbox[t][3] * twidth), (tbox[t][4] * theight))
(gw, gh) = (gw.float(), gh.float())
(gi, gj) = (int(gx), int(gy))
tmp_gt_boxes = torch.FloatTensor([0, 0, gw, gh]).repeat(nA, 1).t()
anchor_boxes = torch.cat((torch.zeros(nA, anchor_step), anchors), 1).t()
(_, best_n) = torch.max(multi_bbox_ious(tmp_gt_boxes, anchor_boxes, x1y1x2y2=False), 0)
gt_box = torch.FloatTensor([gx, gy, gw, gh])
pred_box = pred_boxes[((((b * nAnchors) + (best_n * nPixels)) + (gj * nW)) + gi)]
iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)
coord_mask[b][best_n][gj][gi] = 1
cls_mask[b][best_n][gj][gi] = 1
conf_mask[b][best_n][gj][gi] = 1
tcoord[0][b][best_n][gj][gi] = (gx - gi)
tcoord[1][b][best_n][gj][gi] = (gy - gj)
tcoord[2][b][best_n][gj][gi] = math.log((gw / anchors[best_n][0]))
tcoord[3][b][best_n][gj][gi] = math.log((gh / anchors[best_n][1]))
tcls[b][best_n][gj][gi] = tbox[t][0]
tconf[b][best_n][gj][gi] = (iou if self.rescore else 1.0)
if (iou > 0.5):
nRecall += 1
if (iou > 0.75):
nRecall75 += 1
return (nGT, nRecall, nRecall75, coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls)
def forward(self, output, target):
mask_tuple = self.get_mask_boxes(output)
t0 = time.time()
nB = output.data.size(0)
nA = mask_tuple['n'].item()
nC = self.num_classes
nH = output.data.size(2)
nW = output.data.size(3)
anchor_step = (mask_tuple['a'].size(0) // nA)
anchors = mask_tuple['a'].view(nA, anchor_step).to(self.device)
cls_anchor_dim = (((nB * nA) * nH) * nW)
output = output.view(nB, nA, (5 + nC), nH, nW)
cls_grid = torch.linspace(5, ((5 + nC) - 1), nC).long().to(self.device)
ix = torch.LongTensor(range(0, 5)).to(self.device)
pred_boxes = torch.FloatTensor(4, cls_anchor_dim).to(self.device)
coord = output.index_select(2, ix[0:4]).view((nB * nA), (- 1), (nH * nW)).transpose(0, 1).contiguous().view((- 1), cls_anchor_dim)
coord[0:2] = coord[0:2].sigmoid()
conf = output.index_select(2, ix[4]).view(nB, nA, nH, nW).sigmoid()
cls = output.index_select(2, cls_grid)
cls = cls.view((nB * nA), nC, (nH * nW)).transpose(1, 2).contiguous().view(cls_anchor_dim, nC)
t1 = time.time()
grid_x = torch.linspace(0, (nW - 1), nW).repeat((nB * nA), nH, 1).view(cls_anchor_dim).to(self.device)
grid_y = torch.linspace(0, (nH - 1), nH).repeat(nW, 1).t().repeat((nB * nA), 1, 1).view(cls_anchor_dim).to(self.device)
anchor_w = anchors.index_select(1, ix[0]).repeat(1, ((nB * nH) * nW)).view(cls_anchor_dim)
anchor_h = anchors.index_select(1, ix[1]).repeat(1, ((nB * nH) * nW)).view(cls_anchor_dim)
pred_boxes[0] = (coord[0] + grid_x)
pred_boxes[1] = (coord[1] + grid_y)
pred_boxes[2] = (coord[2].exp() * anchor_w)
pred_boxes[3] = (coord[3].exp() * anchor_h)
pred_boxes = convert2cpu(pred_boxes.transpose(0, 1).contiguous().view((- 1), 4)).detach()
t2 = time.time()
(nGT, nRecall, nRecall75, coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls) = self.build_targets(pred_boxes, target.detach(), anchors.detach(), nA, nH, nW)
cls_mask = (cls_mask == 1)
tcls = tcls[cls_mask].long().view((- 1))
cls_mask = cls_mask.view((- 1), 1).repeat(1, nC).to(self.device)
cls = cls[cls_mask].view((- 1), nC)
nProposals = int((conf > 0.25).sum())
tcoord = tcoord.view(4, cls_anchor_dim).to(self.device)
(tconf, tcls) = (tconf.to(self.device), tcls.to(self.device))
(coord_mask, conf_mask) = (coord_mask.view(cls_anchor_dim).to(self.device), conf_mask.to(self.device))
t3 = time.time()
loss_coord = (nn.MSELoss(size_average=False)((coord * coord_mask), (tcoord * coord_mask)) / 2)
loss_conf = nn.MSELoss(size_average=False)((conf * conf_mask), (tconf * conf_mask))
loss_cls = (nn.CrossEntropyLoss(size_average=False)(cls, tcls) if (cls.size(0) > 0) else 0)
loss = ((loss_coord + loss_conf) + loss_cls)
t4 = time.time()
if False:
print(('-' * 30))
print((' activation : %f' % (t1 - t0)))
print((' create pred_boxes : %f' % (t2 - t1)))
print((' build targets : %f' % (t3 - t2)))
print((' create loss : %f' % (t4 - t3)))
print((' total : %f' % (t4 - t0)))
print(('%d: Layer(%03d) nGT %3d, nRC %3d, nRC75 %3d, nPP %3d, loss: box %6.3f, conf %6.3f, class %6.3f, total %7.3f' % (self.seen, self.nth_layer, nGT, nRecall, nRecall75, nProposals, loss_coord, loss_conf, loss_cls, loss)))
if math.isnan(loss.item()):
print(conf, tconf)
sys.exit(0)
return loss |
def get_iou_dataset(model_id, edge_length_threshold=0.1, filled=False, recalc=False):
manager = IouAutoSavingManager(model_id=model_id, edge_length_threshold=edge_length_threshold, filled=filled)
if (not recalc):
if (not os.path.isfile(manager.path)):
recalc = True
else:
with manager.get_saving_dataset() as ds:
if (len(ds) == 0):
recalc = True
if recalc:
manager.save_all()
return manager.get_saving_dataset('r') |
def save_rouge_scores(str_scores):
with open('rouge_scores.txt', 'w') as output:
output.write(str_scores) |
def compute_target(answers, ans2label):
answer_count = {}
if (len(answers) == 1):
answer_ = preprocess_answer(answers[0]['answer'])
answer_count[answer_] = 10
else:
for answer in answers:
answer_ = preprocess_answer(answer['answer'])
answer_count[answer_] = (answer_count.get(answer_, 0) + 1)
labels = []
scores = []
for answer in answer_count:
if (answer not in ans2label):
continue
labels.append(ans2label[answer])
score = get_score(answer_count[answer])
scores.append(score)
target = {'labels': labels, 'scores': scores}
return target |
def sin2_cos2_width_3(width=None, sin_theta=None, cos_theta=None, features=None):
sin_cos_height_width = []
if (features is not None):
sin_cos_height_width = [features['bbox/sin_theta'], features['bbox/cos_theta'], features['bbox/width']]
else:
sin_cos_height_width = [sin_theta, cos_theta, width]
return K.concatenate(sin_cos_height_width) |
class Evaluator():
def __init__(self, dataset, tokenizer, batch_size=1, pad_val=1, pad_max=512):
self.dataset = dataset
self.tokenizer = tokenizer
self.batch_size = batch_size
self.pad_val = pad_val
self.pad_max = pad_max
self.dataset = self.dataset.map(self.tokenize_function, batched=True)
self.dataset.set_format(type='torch', columns=['input_ids'])
_grad()
def tokenize_function(self, examples):
if ('prompt' in examples):
example = self.tokenizer(examples['prompt'])
elif ('text' in examples):
example = self.tokenizer(examples['text'])
elif ('code' in examples):
example = self.tokenizer(examples['code'])
return example
_grad()
def collate_batch(self, batch):
position_ids_padded = []
input_ids_padded = []
last_ind = []
attention_mask_padded = []
for text in batch:
input_ids = text['input_ids']
if (not args.padding):
input_ids = (input_ids[:int(self.pad_max)] if (len(input_ids) > int(self.pad_max)) else input_ids)
else:
pad_len = (self.pad_max - input_ids.shape[0])
input_ids = pad(input_ids, (0, pad_len), value=self.pad_val)
last_ind.append((input_ids.shape[0] - 1))
attention_mask = torch.ones(len(input_ids))
position_ids = torch.arange(len(input_ids))
input_ids_padded.append(input_ids)
attention_mask_padded.append(attention_mask)
position_ids_padded.append(position_ids)
return ((torch.vstack(input_ids_padded), torch.vstack(attention_mask_padded), torch.vstack(position_ids_padded), tuple(global_past_key_value)), torch.tensor(last_ind)) |
def evaluate(dataset, split, time_data):
print('Evaluate dataset {} in split {} for single stamp supervision'.format(dataset, split))
bz_stages = ('/margin_map_both' + time_data)
recog_path = ((((('./results/' + dataset) + bz_stages) + '_split_') + split) + '/')
ground_truth_path = (('./data/' + dataset) + '/groundTruth/')
file_list = (((('./data/' + dataset) + '/splits/test.split') + split) + '.bundle')
list_of_videos = read_file(file_list).split('\n')[:(- 1)]
overlap = [0.1, 0.25, 0.5]
(tp, fp, fn) = (np.zeros(3), np.zeros(3), np.zeros(3))
file_name = (('./result/' + time_data) + '.xlsx')
workbook = xlsxwriter.Workbook(file_name)
worksheet = workbook.add_worksheet()
metrics = ['', '', '', 'Edit', 'Acc']
row = 0
col = 0
for m in range(len(metrics)):
worksheet.write(row, col, metrics[m])
col += 1
row += 1
col = 0
correct = 0
total = 0
edit = 0
for vid in list_of_videos:
gt_file = (ground_truth_path + vid)
gt_content = read_file(gt_file).split('\n')[0:(- 1)]
recog_file = (recog_path + vid.split('.')[0])
recog_content = read_file(recog_file).split('\n')[1].split()
for i in range(len(gt_content)):
total += 1
if (gt_content[i] == recog_content[i]):
correct += 1
edit += edit_score(recog_content, gt_content)
for s in range(len(overlap)):
(tp1, fp1, fn1) = f_score(recog_content, gt_content, overlap[s])
tp[s] += tp1
fp[s] += fp1
fn[s] += fn1
for s in range(len(overlap)):
precision = (tp[s] / float((tp[s] + fp[s])))
recall = (tp[s] / float((tp[s] + fn[s])))
f1 = ((2.0 * (precision * recall)) / (precision + recall))
f1 = (np.nan_to_num(f1) * 100)
print(('%0.2f: %.4f' % (overlap[s], f1)))
worksheet.write(row, col, round(f1, 4))
col += 1
edit = ((1.0 * edit) / len(list_of_videos))
acc = ((100 * float(correct)) / total)
worksheet.write(row, col, round(edit, 4))
worksheet.write(row, (col + 1), round(acc, 4))
print(('Edit: %.4f' % edit))
print(('Acc: %.4f' % acc))
workbook.close() |
class ProgressBar(object):
def __init__(self, task_num=0, bar_width=50, start=True):
self.task_num = task_num
max_bar_width = self._get_max_bar_width()
self.bar_width = (bar_width if (bar_width <= max_bar_width) else max_bar_width)
self.completed = 0
if start:
self.start()
def _get_max_bar_width(self):
if (sys.version_info > (3, 3)):
from shutil import get_terminal_size
else:
from backports.shutil_get_terminal_size import get_terminal_size
(terminal_width, _) = get_terminal_size()
max_bar_width = min(int((terminal_width * 0.6)), (terminal_width - 50))
if (max_bar_width < 10):
print('terminal width is too small ({}), please consider widen the terminal for better progressbar visualization'.format(terminal_width))
max_bar_width = 10
return max_bar_width
def start(self):
if (self.task_num > 0):
sys.stdout.write('[{}] 0/{}, elapsed: 0s, ETA:'.format((' ' * self.bar_width), self.task_num))
else:
sys.stdout.write('completed: 0, elapsed: 0s')
sys.stdout.flush()
self.timer = Timer()
def update(self):
self.completed += 1
elapsed = self.timer.since_start()
fps = (self.completed / elapsed)
if (self.task_num > 0):
percentage = (self.completed / float(self.task_num))
eta = int((((elapsed * (1 - percentage)) / percentage) + 0.5))
mark_width = int((self.bar_width * percentage))
bar_chars = (('>' * mark_width) + (' ' * (self.bar_width - mark_width)))
sys.stdout.write('\r[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s'.format(bar_chars, self.completed, self.task_num, fps, int((elapsed + 0.5)), eta))
else:
sys.stdout.write('completed: {}, elapsed: {}s, {:.1f} tasks/s'.format(self.completed, int((elapsed + 0.5)), fps))
sys.stdout.flush() |
class Searcher(BaseSearcher):
def __init__(self):
super().__init__(name=Path(__file__).stem)
self._repo = config.INSTANCE['publish'][self.name]
def _search(self, query: str) -> SearchResults:
dfs = []
num_results = 0
validator = SqliteFTS5Matcher(query)
paginated_results = self._github.search_code(query, sort='indexed', highlight=True, repo=self._repo)
for result in paginated_results:
path = Path(result.path)
content = result.decoded_content.decode()
assert content.startswith(_EXPECTED_FIRST_LINE_OF_CONTENT), f"Content of {path} is expected to start with {_EXPECTED_FIRST_LINE_OF_CONTENT.strip()!r} but it doesn't."
for text_match in result.text_matches:
fragment = text_match['fragment']
fragment_index_in_content = content.find(fragment)
if (fragment_index_in_content == (- 1)):
continue
for match in text_match['matches']:
match_indices_in_fragment = match['indices']
match_indices_in_content = [(fragment_index_in_content + i) for i in match_indices_in_fragment]
line_indices_in_content = [content[:match_indices_in_content[0]].rfind('\n'), (match_indices_in_content[1] + content[match_indices_in_content[1]:].find('\n'))]
line_csv = (content[:content.find('\n')] + content[slice(*line_indices_in_content)])
df = pd.read_csv(io.StringIO(line_csv), dtype='string')
searchable_full_text = ' '.join((df.at[(0, c)] for c in ('feed', 'title', 'long_url')))
if (not validator.is_match(searchable_full_text)):
continue
df = cast(pd.DataFrame, df)
df.insert(0, 'channel', path.parts[0])
df.insert(0, 'datetime', datetime.datetime.strptime((str(Path(*path.parts[1:])) + ' +0000'), '%Y/%m%d/%H%M%S.csv %z'))
dfs.append(df)
num_results += 1
if (num_results == _MAX_RESULTS):
self._concat_results_dfs(dfs)
df = dfs[0]
num_results = len(df)
if (num_results == _MAX_RESULTS):
return {'results': df, 'truncated': True}
if dfs:
self._concat_results_dfs(dfs)
return {'results': dfs[0], 'truncated': False}
return {'results': None, 'truncated': None} |
def test_sample_nyu_rgbd_image():
gt_prefix = 'SampleNYURGBDImage'
(gt_data_root, gt_download_dir, gt_extract_dir) = get_test_data_dirs(gt_prefix)
rgbd_image_nyu = o3d.data.SampleNYURGBDImage()
assert Path(gt_download_dir).is_dir()
assert (Path(rgbd_image_nyu.color_path) == (gt_extract_dir / 'NYU_color.ppm'))
assert Path(rgbd_image_nyu.color_path).is_file()
assert (Path(rgbd_image_nyu.depth_path) == (gt_extract_dir / 'NYU_depth.pgm'))
assert Path(rgbd_image_nyu.depth_path).is_file()
assert (rgbd_image_nyu.prefix == gt_prefix)
assert (Path(rgbd_image_nyu.data_root) == gt_data_root)
assert (Path(rgbd_image_nyu.download_dir) == gt_download_dir)
assert (Path(rgbd_image_nyu.extract_dir) == gt_extract_dir) |
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred)).contiguous()
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res |
_model
def tf_efficientnet_lite1(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model |
_model
def efficientnet_cc_b0_8e(pretrained=False, **kwargs):
model = _gen_efficientnet_condconv('efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs)
return model |
def int4a2nbr(data, verbose=False):
if verbose:
print('-> int4a2nbr, data :', data)
dim = len(data)
szd = (4 * dim)
if verbose:
print('-> int4a2nbr size of result :', szd)
result = create_string_buffer(b'', szd)
for k in range(dim):
if (data[k] < 256):
result[(4 * k)] = data[k]
elif (data[k] < 65536):
(result[((4 * k) + 1)], result[(4 * k)]) = divmod(data[k], 256)
elif (data[k] < ):
(result[((4 * k) + 2)], rest) = divmod(data[k], 65536)
(result[((4 * k) + 1)], result[(4 * k)]) = divmod(rest, 256)
else:
(result[((4 * k) + 3)], rest) = divmod(data[k], )
(result[((4 * k) + 2)], rest) = divmod(rest, 65536)
(result[((4 * k) + 1)], result[(4 * k)]) = divmod(rest, 256)
if verbose:
print('-> int4a2nbr returns', result)
return result |
def _create_dummy_loader():
loader = dict(type='HardDiskLoader', repeat=1, parser=dict(type='LineStrParser', keys=['file_name', 'text']))
return loader |
def get_ind(data, start):
array_ = []
for i in range(len(data)):
array_.append((i + start))
return array_ |
def eval_train(model, train_loader):
model.eval()
train_loss = 0
correct = 0
with torch.no_grad():
for (data, target) in train_loader:
(data, target) = (data.cuda(), target.cuda())
output = model(data)
train_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
train_loss /= len(train_loader.dataset)
print('Training: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(train_loss, correct, len(train_loader.dataset), ((100.0 * correct) / len(train_loader.dataset))))
training_accuracy = (correct / len(train_loader.dataset))
return (train_loss, training_accuracy) |
class LTOCF1(BasicModel):
def __init__(self, config: dict, dataset: BasicDataset):
super(LTOCF1, self).__init__()
self.config = config
self.dataset: dataloader.BasicDataset = dataset
self.__init_weight()
self.__init_ode()
def __init_weight(self):
self.num_users = self.dataset.n_users
self.num_items = self.dataset.m_items
self.latent_dim = self.config['latent_dim_rec']
self.n_layers = self.config['lightGCN_n_layers']
self.keep_prob = self.config['keep_prob']
self.A_split = self.config['A_split']
self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim)
if (self.config['pretrain'] == 0):
nn.init.normal_(self.embedding_user.weight, std=0.1)
nn.init.normal_(self.embedding_item.weight, std=0.1)
world.cprint('use NORMAL distribution initilizer')
else:
print('use pretarined data')
self.f = nn.Sigmoid()
self.Graph = self.dataset.getSparseGraph()
print(f"lgn is already to go(dropout:{self.config['dropout']})")
def __init_ode(self):
if (world.config['learnable_time'] == True):
self.time_split = self.config['time_split']
self.odetimes = ode.ODETimeSetter(self.time_split, self.config['K'])
self.odetime_1 = [self.odetimes[0]]
self.ode_block_test_1 = ode.ODEBlockTimeFirst(ode.ODEFunction(self.Graph), self.time_split, self.config['solver'])
self.ode_block_test_2 = ode.ODEBlockTimeLastK(ode.ODEFunction(self.Graph), self.time_split, self.config['solver'], self.config['K'])
else:
self.ode_block_1 = ode.ODEBlock(ode.ODEFunction(self.Graph), self.config['solver'], 0, (self.config['K'] / 2))
self.ode_block_2 = ode.ODEBlock(ode.ODEFunction(self.Graph), self.config['solver'], (self.config['K'] / 2), self.config['K'])
def get_time(self):
ode_times = list(self.odetime_1)
return ode_times
def __dropout_x(self, x, keep_prob):
size = x.size()
index = x.indices().t()
values = x.values()
random_index = (torch.rand(len(values)) + keep_prob)
random_index = random_index.int().bool()
index = index[random_index]
values = (values[random_index] / keep_prob)
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def __dropout(self, keep_prob):
if self.A_split:
graph = []
for g in self.Graph:
graph.append(self.__dropout_x(g, keep_prob))
else:
graph = self.__dropout_x(self.Graph, keep_prob)
return graph
def computer(self):
users_emb = self.embedding_user.weight
items_emb = self.embedding_item.weight
all_emb = torch.cat([users_emb, items_emb])
embs = [all_emb]
if self.config['dropout']:
if self.training:
g_droped = self.__dropout(self.keep_prob)
else:
g_droped = self.Graph
else:
g_droped = self.Graph
'\n layers\n '
if (world.config['learnable_time'] == True):
out_1 = self.ode_block_test_1(all_emb, self.odetime_1)
if (world.config['dual_res'] == False):
out_1 = (out_1 - all_emb)
embs.append(out_1)
out_2 = self.ode_block_test_2(out_1, self.odetime_1)
if (world.config['dual_res'] == False):
out_2 = (out_2 - out_1)
embs.append(out_2)
elif (world.config['learnable_time'] == False):
all_emb_1 = self.ode_block_1(all_emb)
all_emb_1 = (all_emb_1 - all_emb)
embs.append(all_emb_1)
all_emb_2 = self.ode_block_2(all_emb_1)
all_emb_2 = (all_emb_2 - all_emb_1)
embs.append(all_emb_2)
embs = torch.stack(embs, dim=1)
light_out = torch.mean(embs, dim=1)
(users, items) = torch.split(light_out, [self.num_users, self.num_items])
return (users, items)
def getUsersRating(self, users):
(all_users, all_items) = self.computer()
users_emb = all_users[users.long()]
items_emb = all_items
rating = self.f(torch.matmul(users_emb, items_emb.t()))
return rating
def getEmbedding(self, users, pos_items, neg_items):
(all_users, all_items) = self.computer()
users_emb = all_users[users]
pos_emb = all_items[pos_items]
neg_emb = all_items[neg_items]
users_emb_ego = self.embedding_user(users)
pos_emb_ego = self.embedding_item(pos_items)
neg_emb_ego = self.embedding_item(neg_items)
return (users_emb, pos_emb, neg_emb, users_emb_ego, pos_emb_ego, neg_emb_ego)
def bpr_loss(self, users, pos, neg):
(users_emb, pos_emb, neg_emb, userEmb0, posEmb0, negEmb0) = self.getEmbedding(users.long(), pos.long(), neg.long())
reg_loss = (((1 / 2) * ((userEmb0.norm(2).pow(2) + posEmb0.norm(2).pow(2)) + negEmb0.norm(2).pow(2))) / float(len(users)))
pos_scores = torch.mul(users_emb, pos_emb)
pos_scores = torch.sum(pos_scores, dim=1)
neg_scores = torch.mul(users_emb, neg_emb)
neg_scores = torch.sum(neg_scores, dim=1)
loss = torch.mean(torch.nn.functional.softplus((neg_scores - pos_scores)))
return (loss, reg_loss)
def forward(self, users, items):
(all_users, all_items) = self.computer()
users_emb = all_users[users]
items_emb = all_items[items]
inner_pro = torch.mul(users_emb, items_emb)
gamma = torch.sum(inner_pro, dim=1)
return gamma |
def check_free_port(host, port, verbose=True):
sock = socket.socket()
try:
sock.bind((host, port))
sock.close()
print('host {} on port {} is AVAIL'.format(host, port))
return True
except:
print('host {} on port {} is BUSY'.format(host, port))
sock.close()
return False |
def _start_():
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
np.set_printoptions(precision=4)
torch.set_printoptions(precision=4)
print()
print('[ARGUMENTS]')
print(args)
print()
return |
def ResNet152Body(net, from_layer, use_pool5=True, use_dilation_conv5=False, **bn_param):
conv_prefix = ''
conv_postfix = ''
bn_prefix = 'bn_'
bn_postfix = ''
scale_prefix = 'scale_'
scale_postfix = ''
ConvBNLayer(net, from_layer, 'conv1', use_bn=True, use_relu=True, num_output=64, kernel_size=7, pad=3, stride=2, conv_prefix=conv_prefix, conv_postfix=conv_postfix, bn_prefix=bn_prefix, bn_postfix=bn_postfix, scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param)
net.pool1 = L.Pooling(net.conv1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
ResBody(net, 'pool1', '2a', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=True, **bn_param)
ResBody(net, 'res2a', '2b', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=False, **bn_param)
ResBody(net, 'res2b', '2c', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=False, **bn_param)
ResBody(net, 'res2c', '3a', out2a=128, out2b=128, out2c=512, stride=2, use_branch1=True, **bn_param)
from_layer = 'res3a'
for i in xrange(1, 8):
block_name = '3b{}'.format(i)
ResBody(net, from_layer, block_name, out2a=128, out2b=128, out2c=512, stride=1, use_branch1=False, **bn_param)
from_layer = 'res{}'.format(block_name)
ResBody(net, from_layer, '4a', out2a=256, out2b=256, out2c=1024, stride=2, use_branch1=True, **bn_param)
from_layer = 'res4a'
for i in xrange(1, 36):
block_name = '4b{}'.format(i)
ResBody(net, from_layer, block_name, out2a=256, out2b=256, out2c=1024, stride=1, use_branch1=False, **bn_param)
from_layer = 'res{}'.format(block_name)
stride = 2
dilation = 1
if use_dilation_conv5:
stride = 1
dilation = 2
ResBody(net, from_layer, '5a', out2a=512, out2b=512, out2c=2048, stride=stride, use_branch1=True, dilation=dilation, **bn_param)
ResBody(net, 'res5a', '5b', out2a=512, out2b=512, out2c=2048, stride=1, use_branch1=False, dilation=dilation, **bn_param)
ResBody(net, 'res5b', '5c', out2a=512, out2b=512, out2c=2048, stride=1, use_branch1=False, dilation=dilation, **bn_param)
if use_pool5:
net.pool5 = L.Pooling(net.res5c, pool=P.Pooling.AVE, global_pooling=True)
return net |
def test_fixing_values(conf_scope):
cfg = conf_scope({'a': 100})
assert (cfg['a'] == 100)
assert (cfg['composit1'] == 102.0) |
def semnasnet_100(pretrained=False, **kwargs):
model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs)
return model |
def add_special_tokens_to_vocab(model_dir: Path, separate_vocab=False) -> None:
if separate_vocab:
vocab = load_yaml(find_src_vocab_file(model_dir))
vocab = {k: int(v) for (k, v) in vocab.items()}
num_added = add_to_vocab_(vocab, ['<pad>'])
save_json(vocab, (model_dir / 'vocab.json'))
vocab = load_yaml(find_tgt_vocab_file(model_dir))
vocab = {k: int(v) for (k, v) in vocab.items()}
num_added = add_to_vocab_(vocab, ['<pad>'])
save_json(vocab, (model_dir / 'target_vocab.json'))
save_tokenizer_config(model_dir, separate_vocabs=separate_vocab)
else:
vocab = load_yaml(find_vocab_file(model_dir))
vocab = {k: int(v) for (k, v) in vocab.items()}
num_added = add_to_vocab_(vocab, ['<pad>'])
print(f'added {num_added} tokens to vocab')
save_json(vocab, (model_dir / 'vocab.json'))
save_tokenizer_config(model_dir) |
def tokenize(refs, cands, no_op=False):
tokenizer = PTBTokenizer()
if no_op:
refs = {idx: [r for r in c_refs] for (idx, c_refs) in enumerate(refs)}
cands = {idx: [c] for (idx, c) in enumerate(cands)}
else:
refs = {idx: [{'caption': r} for r in c_refs] for (idx, c_refs) in enumerate(refs)}
cands = {idx: [{'caption': c}] for (idx, c) in enumerate(cands)}
refs = tokenizer.tokenize(refs)
cands = tokenizer.tokenize(cands)
return (refs, cands) |
class CityscapesSemSegEvaluator(CityscapesEvaluator):
def process(self, inputs, outputs):
from cityscapesscripts.helpers.labels import trainId2label
for (input, output) in zip(inputs, outputs):
file_name = input['file_name']
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_filename = os.path.join(self._temp_dir, (basename + '_pred.png'))
output = output['sem_seg'].argmax(dim=0).to(self._cpu_device).numpy()
pred = (255 * np.ones(output.shape, dtype=np.uint8))
for (train_id, label) in trainId2label.items():
if label.ignoreInEval:
continue
pred[(output == train_id)] = label.id
Image.fromarray(pred).save(pred_filename)
def evaluate(self):
comm.synchronize()
if (comm.get_rank() > 0):
return
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
self._logger.info('Evaluating results under {} ...'.format(self._temp_dir))
cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
cityscapes_eval.args.predictionWalk = None
cityscapes_eval.args.JSONOutput = False
cityscapes_eval.args.colorized = False
gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
groundTruthImgList = glob.glob(os.path.join(gt_dir, '*', '*_gtFine_labelIds.png'))
assert len(groundTruthImgList), 'Cannot find any ground truth images to use for evaluation. Searched for: {}'.format(cityscapes_eval.args.groundTruthSearch)
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
results = cityscapes_eval.evaluateImgLists(predictionImgList, groundTruthImgList, cityscapes_eval.args)
ret = OrderedDict()
ret['sem_seg'] = {'IoU': (100.0 * results['averageScoreClasses']), 'iIoU': (100.0 * results['averageScoreInstClasses']), 'IoU_sup': (100.0 * results['averageScoreCategories']), 'iIoU_sup': (100.0 * results['averageScoreInstCategories'])}
self._working_dir.cleanup()
return ret |
def ibn_resnet152(**kwargs):
return get_ibnresnet(blocks=152, model_name='ibn_resnet152', **kwargs) |
def _res_shortcut_D_dec(block, layers, **kwargs):
model = ResShortCut_D_Dec(block, layers, **kwargs)
return model |
def read_data(config, data_type, ref, data_filter=None):
data_path = os.path.join(config.data_dir, 'data_{}.json'.format(data_type))
shared_path = os.path.join(config.data_dir, 'shared_{}.json'.format(data_type))
with open(data_path, 'r') as fh:
data = json.load(fh)
with open(shared_path, 'r') as fh:
shared = json.load(fh)
num_examples = len(next(iter(data.values())))
if (data_filter is None):
valid_idxs = range(num_examples)
else:
mask = []
keys = data.keys()
values = data.values()
for vals in zip(*values):
each = {key: val for (key, val) in zip(keys, vals)}
mask.append(data_filter(each, shared))
valid_idxs = [idx for idx in range(len(mask)) if mask[idx]]
print('Loaded {}/{} examples from {}'.format(len(valid_idxs), num_examples, data_type))
shared_path = (config.shared_path or os.path.join(config.out_dir, 'shared.json'))
if (not ref):
word2vec_dict = (shared['lower_word2vec'] if config.lower_word else shared['word2vec'])
word_counter = (shared['lower_word_counter'] if config.lower_word else shared['word_counter'])
char_counter = shared['char_counter']
if config.finetune:
shared['word2idx'] = {word: (idx + 2) for (idx, word) in enumerate((word for (word, count) in word_counter.items() if ((count > config.word_count_th) or (config.known_if_glove and (word in word2vec_dict)))))}
else:
assert config.known_if_glove
assert config.use_glove_for_unk
shared['word2idx'] = {word: (idx + 2) for (idx, word) in enumerate((word for (word, count) in word_counter.items() if ((count > config.word_count_th) and (word not in word2vec_dict))))}
shared['char2idx'] = {char: (idx + 2) for (idx, char) in enumerate((char for (char, count) in char_counter.items() if (count > config.char_count_th)))}
NULL = '-NULL-'
UNK = '-UNK-'
shared['word2idx'][NULL] = 0
shared['word2idx'][UNK] = 1
shared['char2idx'][NULL] = 0
shared['char2idx'][UNK] = 1
json.dump({'word2idx': shared['word2idx'], 'char2idx': shared['char2idx']}, open(shared_path, 'w'))
else:
new_shared = json.load(open(shared_path, 'r'))
for (key, val) in new_shared.items():
shared[key] = val
if config.use_glove_for_unk:
word2vec_dict = (shared['lower_word2vec'] if config.lower_word else shared['word2vec'])
new_word2idx_dict = {word: idx for (idx, word) in enumerate((word for word in word2vec_dict.keys() if (word not in shared['word2idx'])))}
shared['new_word2idx'] = new_word2idx_dict
offset = len(shared['word2idx'])
word2vec_dict = (shared['lower_word2vec'] if config.lower_word else shared['word2vec'])
new_word2idx_dict = shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for (word, idx) in new_word2idx_dict.items()}
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
shared['new_emb_mat'] = new_emb_mat
data_set = DataSet(data, data_type, shared=shared, valid_idxs=valid_idxs)
return data_set |
def _add_categories_metadata(dataset_name: str, categories: Dict[(str, Any)]):
meta = MetadataCatalog.get(dataset_name)
meta.categories = {c['id']: c['name'] for c in categories}
logger = logging.getLogger(__name__)
logger.info('Dataset {} categories: {}'.format(dataset_name, categories)) |
class DIV2K(srdata.SRData):
def __init__(self, args, train=True):
super(DIV2K, self).__init__(args, train)
self.repeat = (args.test_every // (args.n_train // args.batch_size))
def _scan(self):
list_hr = []
if self.train:
idx_begin = 0
idx_end = self.args.n_train
else:
idx_begin = self.args.n_train
idx_end = (self.args.offset_val + self.args.n_val)
for i in range((idx_begin + 1), (idx_end + 1)):
filename = '{:0>4}'.format(i)
list_hr.append(os.path.join(self.dir_hr, (filename + self.ext)))
return list_hr
def _set_filesystem(self, dir_data):
self.apath = (dir_data + '/DIV2K')
self.dir_hr = os.path.join(self.apath, 'DIV2K_HQ')
self.ext = '.png'
def _name_hrbin(self):
return os.path.join(self.apath, 'bin', '{}_bin_HR.npy'.format(self.split))
def __len__(self):
if self.train:
return (len(self.images_hr) * self.repeat)
else:
return len(self.images_hr)
def _get_index(self, idx):
if self.train:
return (idx % len(self.images_hr))
else:
return idx |
def test_nrtr_encoder():
tf_encoder = NRTREncoder()
tf_encoder.init_weights()
tf_encoder.train()
feat = torch.randn(1, 512, 1, 25)
out_enc = tf_encoder(feat)
print('hello', out_enc.size())
assert (out_enc.shape == torch.Size([1, 25, 512])) |
def get_maybe_sharded_checkpoint_filename(filename: str, suffix: str, shard_idx: int, num_shards: int) -> str:
orig_filename = filename
filename = filename.replace('.pt', (suffix + '.pt'))
fsdp_filename = (filename[:(- 3)] + f'-shard{shard_idx}.pt')
model_parallel_filename = (orig_filename[:(- 3)] + f'_part{shard_idx}.pt')
if PathManager.exists(fsdp_filename):
return fsdp_filename
elif (num_shards > 1):
return model_parallel_filename
else:
return filename |
def main():
top_widgets = []
for i in range(num_top):
top_widgets.append(ORCWidget(('HF_' + str(i)), [top_button_width_min, top_button_width_pref, top_button_width_max, top_button_height_min, top_button_height_pref, top_button_height_max]))
column = ORCColumn('column', None, window_width, window_height)
horizonalflow = HorizontalFlow('HF', top_widgets, column, True)
textbox = ORCWidget('textbox', [textbox_min, textbox_pref, textbox_max, textbox_min, textbox_pref, textbox_max], horizonalflow)
textbox.set_weight(1e-05)
column.define_sublayouts([horizonalflow, textbox])
start = time.time()
column.solve()
print(('Time: ' + str((time.time() - start))))
if show_window:
time_result.insert(0, str((time.time() - start)))
(best_leaf, best_leaf_result, best_leaf_loss) = column.get_best()
if (best_leaf == None):
print('No Solution!')
exit()
horizontalflow_row_height = best_leaf.parent.best_row_height
horizontalflow_row_width = best_leaf.parent.best_row_width
horizontalflow_result_index = best_leaf.parent.best_result_index
HF_l = best_leaf_result['HF_l']
HF_r = best_leaf_result['HF_r']
HF_t = best_leaf_result['HF_t']
HF_b = best_leaf_result['HF_b']
textbox_l = best_leaf_result['textbox_l']
textbox_r = best_leaf_result['textbox_r']
textbox_t = best_leaf_result['textbox_t']
textbox_b = best_leaf_result['textbox_b']
left = HF_l
top = HF_t
index = 0
for i in range(len(horizontalflow_result_index)):
for j in range(len(horizontalflow_result_index[i])):
widget_width = horizontalflow_row_width[i][j]
widget_height = horizontalflow_row_height[i]
if show_window:
widgets[index][0].place(x=left, y=top, width=widget_width, height=widget_height)
left += widget_width
index += 1
left = HF_l
top += widget_height
if show_window:
widgets[(- 1)][0].place(x=textbox_l, y=textbox_t, width=(textbox_r - textbox_l), height=(textbox_b - textbox_t))
if show_window:
mainloop() |
def resnet152_eca(k_size=[5, 5, 5, 7]):
print('Constructing resnet152_eca......')
model = ResNet(Bottleneck, [3, 8, 36, 3], ECA=k_size)
return model |
class DescriptorCollection(list):
def val_to_description(self, val):
d = self[0]
for d in self:
if d.contains_value(val):
break
return d.adj
def sample(self):
return random.choice(self).sample() |
def _create_input_ids_from_token_ids(token_ids_a, token_ids_b, tokenizer, max_seq_length):
pair = (len(token_ids_b) != 0)
num_special_tokens_to_add = tokenizer.num_special_tokens_to_add(pair=pair)
while ((len(token_ids_a) + len(token_ids_b)) > (max_seq_length - num_special_tokens_to_add)):
if (len(token_ids_b) > 0):
token_ids_b = token_ids_b[:(- 1)]
else:
token_ids_a = token_ids_a[:(- 1)]
input_ids = tokenizer.build_inputs_with_special_tokens(token_ids_a, (token_ids_b if pair else None))
attention_mask = ([1] * len(input_ids))
token_type_ids = tokenizer.create_token_type_ids_from_sequences(token_ids_a, (token_ids_b if pair else None))
padding_length = (max_seq_length - len(input_ids))
if (tokenizer.padding_side == 'right'):
input_ids = (input_ids + ([tokenizer.pad_token_id] * padding_length))
attention_mask = (attention_mask + ([0] * padding_length))
token_type_ids = (token_type_ids + ([tokenizer.pad_token_type_id] * padding_length))
else:
input_ids = (([tokenizer.pad_token_id] * padding_length) + input_ids)
attention_mask = (([0] * padding_length) + attention_mask)
token_type_ids = (([tokenizer.pad_token_type_id] * padding_length) + token_type_ids)
assert (len(input_ids) == max_seq_length)
assert (len(attention_mask) == max_seq_length)
assert (len(token_type_ids) == max_seq_length)
return (input_ids, attention_mask, token_type_ids) |
def crps_minimum(yHat_2d, y_2d):
avg = []
for (i, (yHat_val, y_2d_val)) in enumerate(zip(yHat_2d.flatten(), y_2d.flatten())):
optimal_tau_pll = ((yHat_val - y_2d_val) ** (- 2.0))
result = optimize.minimize(crps_minimization, np.sqrt((1.0 / optimal_tau_pll)), method='L-BFGS-B', args=(y_2d_val, yHat_val), bounds=[(None, .0)], options={'maxiter': 100})
crps_min = crps(y_2d_val, yHat_val, (result.x[0] ** 2.0))
avg.append(crps_min)
return np.mean(avg) |
def test_runningmeanstd():
import subprocess
subprocess.check_call(['mpirun', '-np', '3', 'python', '-c', 'from baselines.common.mpi_moments import _helper_runningmeanstd; _helper_runningmeanstd()']) |
class ViTMSNConfig(PretrainedConfig):
model_type = 'vit_msn'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias |
def test_nonref_iterators():
pairs = m.IntPairs([(1, 2), (3, 4), (0, 5)])
assert (list(pairs.nonref()) == [(1, 2), (3, 4), (0, 5)])
assert (list(pairs.nonref_keys()) == [1, 3, 0])
assert (list(pairs.nonref_values()) == [2, 4, 5]) |
def load_dobldobl_system():
from phcpy.phcpy2c3 import py2c_syscon_number_of_dobldobl_polynomials
from phcpy.phcpy2c3 import py2c_syscon_load_dobldobl_polynomial
dim = py2c_syscon_number_of_dobldobl_polynomials()
result = []
for ind in range(1, (dim + 1)):
result.append(py2c_syscon_load_dobldobl_polynomial(ind))
return result |
def header(text, color=0.4, hpad=20, vpad=15):
if isinstance(vpad, (float, int)):
vpad = [vpad, vpad]
if isinstance(hpad, (float, int)):
hpad = [hpad, hpad]
line_height = imgui.core.get_text_line_height()
if isinstance(color, (float, int)):
color = [color, color, color, 1]
imgui.push_style_color(imgui.COLOR_TEXT, *color)
(cx, cy) = imgui.get_cursor_position()
imgui.set_cursor_position([(cx + hpad[0]), (cy + vpad[0])])
imgui.text(text)
imgui.pop_style_color(1)
(yield)
imgui.set_cursor_position([(cx + hpad[1]), (((cy + line_height) + vpad[0]) + vpad[1])])
imgui.separator() |
def main():
parser = argparse.ArgumentParser(description='Singing separation Trainer')
parser.add_argument('--use_wandb', type=str2bool, default=False)
parser.add_argument('--entity', type=str, default='your_entity_id')
parser.add_argument('--project', type=str, default='your_project_name')
parser.add_argument('--sweep', type=str2bool, default=False)
parser.add_argument('--target', type=str, default='vocals', help='target source (will be passed to the dataset)')
parser.add_argument('--mixed_precision', type=str2bool, default=False, help='use mixed precision training?')
parser.add_argument('--gradient_clip', type=float, default=None, help='grad_clip max_norm parameter. None if you do not want to use grad_clip.')
parser.add_argument('--architecture', type=str, default='conv_tasnet_stft', help='network architecture')
parser.add_argument('--mask_act', type=str, default='linear', help='relu, linear')
parser.add_argument('--ff_activation', type=str, default='relu', help='relu, linear, gelu, etc.')
parser.add_argument('--encoder_activation', type=str, default=None, help='relu, linear')
parser.add_argument('--no_mask', type=str2bool, default=False, help='use masking method? Default:False')
parser.add_argument('--no_mask_residual', type=str2bool, default=False, help='use masking method? Default:False')
parser.add_argument('--mixture_consistency', type=str, default=None, help="use mixture consistency training? or SFSRNet training? ['residual', 'mixture_consistency', 'sfsrnet']")
parser.add_argument('--srnet', type=str, default='orig', help="use orig srnet or ConvNext style srnet? ['orig', 'convnext']")
parser.add_argument('--db_normalize', type=str2bool, default=False, help='when using sfsrnet, use db_normalize of SFSRNet input?')
parser.add_argument('--sr_input_res', type=str2bool, default=False, help='when using sfsrnet, use output residual connection? recommended when using original style SRNet')
parser.add_argument('--sr_out_mix_consistency', type=str2bool, default=False, help='when using sfsrnet, apply mixture consistency constraint on srnet output?')
parser.add_argument('--n_blocks', type=int, default=6, help='Number of convolutional blocks in each repeat. Defaults to 6.')
parser.add_argument('--n_repeats', type=int, default=4, help='Number of repeats. Defaults to 4.')
parser.add_argument('--bn_chan', type=int, default=256, help='Number of channels after the bottleneck.')
parser.add_argument('--skip_chan', type=int, default=256, help='Number of channels of skip connection outputs.')
parser.add_argument('--hid_chan', type=int, default=1024, help='Number of channels in the convolutional blocks.')
parser.add_argument('--dataset', type=str, default='singing_librispeech', choices=['singing_librispeech', 'multi_singing_librispeech'], help='Name of the dataset. Duet for singing_librispeech, Main vs. rest for multi_singing_librispeech')
parser.add_argument('--sing_sing_ratio', type=float, default=0.15, help='singing+singing train dataset portion')
parser.add_argument('--sing_speech_ratio', type=float, default=0.15, help='singing+singing train dataset portion')
parser.add_argument('--same_song_ratio', type=float, default=0.2, help='singing+singing train dataset portion')
parser.add_argument('--same_singer_ratio', type=float, default=0.2, help='singing+singing train dataset portion')
parser.add_argument('--same_speaker_ratio', type=float, default=0.15, help='singing+singing train dataset portion')
parser.add_argument('--reduced_training_data_ratio', type=float, default=1.0, help='since sfsrnet took so long time to training, reduced the train data size. Different from --part_of_data')
parser.add_argument('--unison_prob', type=float, default=0.3, help='unison augmentation probability. If 0., no augmentation')
parser.add_argument('--pitch_formant_augment_prob', type=float, default=0.4, help='pitch shift + formant augmentation. If 0., no augmentation')
parser.add_argument('--train_root', nargs='+', default=['/path/to/data/24k/CSD', '/path/to/data/24k/NUS', '/path/to/data/24k/TONAS', '/path/to/data/24k/VocalSet', '/path/to/data/24k/jsut-song_ver1', '/path/to/data/24k/jvs_music_ver1', '/path/to/data/24k/kiritan_revised', '/path/to/data/24k/vocadito', '/path/to/data/24k/musdb_a_train', '/path/to/data/24k/OpenSinger', '/path/to/data/24k/medleyDB_v1_in_musdb', '/path/to/data/24k/k_multisinger', '/path/to/data/24k/k_multitimbre'], help='root path list of dataset')
parser.add_argument('--speech_train_root', nargs='+', default=['/path/to/data/24k/LibriSpeech_train-clean-360', '/path/to/data/24k/LibriSpeech_train-clean-100'], help='root path list of dataset')
parser.add_argument('--same_song_dict_path', nargs='+', action='append', default=[['/path/to/data/24k/k_multisinger', './svs/preprocess/make_same_song_dict/same_song_k_multisinger.json', 'k_multisinger']], help='For making the dataloader that outputs source1 and source2 from SAME song. list of [[data_root,data_dict_path, data_name], ...]')
parser.add_argument('--same_singer_dict_path', nargs='+', action='append', default=[['/path/to/data/24k/OpenSinger', './svs/preprocess/make_same_singer_dict/same_singer_OpenSinger.json', 'OpenSinger'], ['/path/to/data/24k/k_multisinger', './svs/preprocess/make_same_singer_dict/same_singer_k_multisinger.json', 'k_multisinger'], ['/path/to/data/24k/CSD', './svs/preprocess/make_same_singer_dict/same_singer_CSD.json', 'CSD'], ['/path/to/data/24k/jsut-song_ver1', './svs/preprocess/make_same_singer_dict/same_singer_jsut-song_ver1.json', 'jsut-song_ver1'], ['/path/to/data/24k/jvs_music_ver1', './svs/preprocess/make_same_singer_dict/same_singer_jvs_music_ver1.json', 'jvs_music_ver1'], ['/path/to/data/24k/k_multitimbre', './svs/preprocess/make_same_singer_dict/same_singer_k_multitimbre.json', 'k_multitimbre'], ['/path/to/data/24k/kiritan_revised', './svs/preprocess/make_same_singer_dict/same_singer_kiritan.json', 'kiritan'], ['/path/to/data/24k/musdb_a_train', './svs/preprocess/make_same_singer_dict/same_singer_musdb_a_train.json', 'musdb_a_train'], ['/path/to/data/24k/NUS', './svs/preprocess/make_same_singer_dict/same_singer_NUS.json', 'NUS'], ['/path/to/data/24k/VocalSet', './svs/preprocess/make_same_singer_dict/same_singer_VocalSet.json', 'VocalSet']], help='For making the dataloader that outputs source1 and source2 from SAME singer. list of [[data_root,data_dict_path, data_name], ...]')
parser.add_argument('--same_speaker_dict_path', nargs='+', action='append', default=[['/path/to/data/24k/LibriSpeech_train-clean-100', './svs/preprocess/make_same_speaker_dict/same_singer_LibriSpeech_train-clean-100.json', 'LibriSpeech_train-clean-100'], ['/path/to/data/24k/LibriSpeech_train-clean-360', './svs/preprocess/make_same_speaker_dict/same_singer_LibriSpeech_train-clean-360.json', 'LibriSpeech_train-clean-360']], help='For making the dataloader that outputs source1 and source2 from SAME speaker. list of [[data_root,data_dict_path, data_name], ...]')
parser.add_argument('--valid_root', nargs='+', action='append', default=[['/path/to/data/24k/musdb_a_test', '/path/to/data/24k/musdb_a_test', './svs/preprocess/make_validation_dict/for_2_srcs/valid_regions_dict_singing_singing.json', 'sing_sing_diff'], ['/path/to/data/24k/musdb_a_test', '/path/to/data/24k/musdb_a_test', './svs/preprocess/make_validation_dict/for_2_srcs/valid_regions_dict_singing_unison.json', 'sing_sing_unison'], ['/path/to/data/24k/musdb_a_test', '/path/to/data/24k/musdb_a_test', './svs/preprocess/make_validation_dict/for_2_srcs/valid_regions_dict_singing_singing_same_singer.json', 'sing_sing_same_singer'], ['/path/to/data/24k/LibriSpeech_dev-clean', '/path/to/data/24k/LibriSpeech_dev-clean', './svs/preprocess/make_validation_dict/for_2_srcs/valid_regions_dict_speech_speech.json', 'speech_speech_diff'], ['/path/to/data/24k/LibriSpeech_dev-clean', '/path/to/data/24k/LibriSpeech_dev-clean', './svs/preprocess/make_validation_dict/for_2_srcs/valid_regions_dict_speech_unison.json', 'speech_speech_unison'], ['/path/to/data/24k/LibriSpeech_dev-clean', '/path/to/data/24k/LibriSpeech_dev-clean', './svs/preprocess/make_validation_dict/for_2_srcs/valid_regions_dict_speech_speech_same_speaker.json', 'speech_speech_same_speaker'], ['/path/to/data/24k/musdb_a_test', '/path/to/data/24k/LibriSpeech_dev-clean', './svs/preprocess/make_validation_dict/for_2_srcs/valid_regions_dict_singing_speech.json', 'singing_speech']], help='root path list of dataset. list of [source1 data_dir, source2 data_dir, data_region_info_dict]')
parser.add_argument('--valid_root_orpit', nargs='+', action='append', default=[['/path/to/data/24k/musdb_a_test', '/path/to/data/24k/musdb_a_test', './svs/preprocess/make_validation_dict/for_n_srcs/valid_regions_dict_singing_singing_n_srcs.json', 'sing_sing_diff'], ['/path/to/data/24k/musdb_a_test', '/path/to/data/24k/musdb_a_test', './svs/preprocess/make_validation_dict/for_n_srcs/valid_regions_dict_singing_unison_n_srcs.json', 'sing_sing_unison'], ['/path/to/data/24k/musdb_a_test', '/path/to/data/24k/musdb_a_test', './svs/preprocess/make_validation_dict/for_n_srcs/valid_regions_dict_singing_singing_same_singer_n_srcs.json', 'sing_sing_same_singer'], ['/path/to/data/24k/LibriSpeech_dev-clean', '/path/to/data/24k/LibriSpeech_dev-clean', './svs/preprocess/make_validation_dict/for_n_srcs/valid_regions_dict_speech_speech_n_srcs.json', 'speech_speech_diff'], ['/path/to/data/24k/LibriSpeech_dev-clean', '/path/to/data/24k/LibriSpeech_dev-clean', './svs/preprocess/make_validation_dict/for_n_srcs/valid_regions_dict_speech_unison_n_srcs.json', 'speech_speech_unison'], ['/path/to/data/24k/LibriSpeech_dev-clean', '/path/to/data/24k/LibriSpeech_dev-clean', './svs/preprocess/make_validation_dict/for_n_srcs/valid_regions_dict_speech_speech_same_speaker_n_srcs.json', 'speech_speech_same_speaker'], ['/path/to/data/24k/musdb_a_test', '/path/to/data/24k/LibriSpeech_dev-clean', './svs/preprocess/make_validation_dict/for_n_srcs/valid_regions_dict_singing_speech_n_srcs.json', 'singing_speech']], help='root path list of dataset. list of [source1 data_dir, source2 data_dir, data_region_info_dict]')
parser.add_argument('--song_length_dict_path', type=str, default='./svs/preprocess/song_length_dict_24k.json', help='path of json file that contains the lengths of data')
parser.add_argument('--min_n_src', type=int, default=2, help='minimum number of sources in mixture during OR-PIT trianing')
parser.add_argument('--max_n_src', type=int, default=4, help='maximum number of sources in mixture during OR-PIT trianing')
parser.add_argument('--valid_regions_dict_path', type=str, default='./svs/preprocess/valid_regions_dict_singing_singing.json', help='path of json file that contains the lengths of data')
parser.add_argument('--output_directory', type=str, default='/path/to/results/singing_sep')
parser.add_argument('--exp_name', type=str)
parser.add_argument('--part_of_data', type=float, default=None, help='to check the effect of data amount')
parser.add_argument('--ema', type=str2bool, default=True, help='use model ema?')
parser.add_argument('--optimizer', type=str, default='adam', help='which optimizer do you want to use?')
parser.add_argument('--resume', type=str, help='path to checkpoint folder')
parser.add_argument('--continual_train', type=str2bool, default=False, help='continue training from the pre-trained checkpoints')
parser.add_argument('--load_ema_online_model', type=str2bool, default=False, help='continue training from the online model from the ema pre-trained checkpoints. To use this, make sure --ema=False')
parser.add_argument('--start_from_best', type=str2bool, default=False, help='when use --continual_train, do you want to start from previous best-performed weight?')
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, defaults to 2e-4')
parser.add_argument('--beta1', type=float, default=0.5, help='adam optimizer beta1')
parser.add_argument('--beta2', type=float, default=0.9, help='adam optimizer beta2')
parser.add_argument('--eps', type=float, default=1e-08, help='optimizer eps parameter')
parser.add_argument('--patience', type=int, default=50, help='maximum number of epochs to train (my default: 80)')
parser.add_argument('--lr_decay_patience', type=int, default=20, help='lr decay patience for plateau scheduler (my default : 25)')
parser.add_argument('--lr_decay_gamma', type=float, default=0.5, help='gamma of learning rate scheduler decay')
parser.add_argument('--lr_scheduler', type=str, default='step_lr', help='step_lr, cos_warmup')
parser.add_argument('--train_loss_func', nargs='+', default=['pit_snr', 'multi_spectral_l1'], help='mse, L1, pit_si_sdr, pit_sd_sdr, pit_sdr, multi_spectral, multi_spectral_l1')
parser.add_argument('--valid_loss_func', nargs='+', default=['pit_si_sdr'], help='keep this unchanged for validation loss check.')
parser.add_argument('--above_freq', type=float, default=300.0, help='if you want to calc spectral loss only above --above_freq. Applicable when using multi_spectral_l1_above_freq in train. Also applicable when using iSRNet Heuristic calculation.')
parser.add_argument('--multi_spec_loss_log_scale', type=str2bool, default=False, help='use log to increase multispectral loss scale')
parser.add_argument('--weight_decay', type=float, default=1e-06, help='weight decay')
parser.add_argument('--seed', type=int, default=777, metavar='S', help='random seed (default: 42)')
parser.add_argument('--sample_rate', type=int, default=24000, help='Sequence duration in secondsvalue of <=0.0 will use full/variable length')
parser.add_argument('--seq_dur', type=float, default=3.0, help='Sequence duration in secondsvalue of <=0.0 will use full/variable length')
parser.add_argument('--n_src', type=int, default=2, help='number of estimating sources')
parser.add_argument('--nfft', type=int, default=2048, help='STFT fft size and window size')
parser.add_argument('--nhop', type=int, default=512, help='STFT hop size')
parser.add_argument('--n_filter', type=int, default=512, help='learnable basis filter size')
parser.add_argument('--n_kernel', type=int, default=512, help='learnable basis kernel size')
parser.add_argument('--nb_workers', type=int, default=4, help='Number of workers for dataloader.')
parser.add_argument('--quiet', action='store_true', default=False, help='less verbose during training')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--port', default=None, type=str, help='port')
parser.add_argument('--rank', default=0, type=int)
parser.add_argument('--world_size', default=4, type=int)
parser.add_argument('--n_nodes', default=1, type=int)
(args, _) = parser.parse_known_args()
args.output = f'{args.output_directory}/checkpoint/{args.exp_name}'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = str(random.randint(0, 1800))
os.makedirs(args.output, exist_ok=True)
torch.manual_seed(args.seed)
random.seed(args.seed)
print(args)
train(args) |
class DistilBertTokenizationTest(BertTokenizationTest):
tokenizer_class = DistilBertTokenizer
def get_tokenizer(self, **kwargs):
return DistilBertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def test_sequence_builders(self):
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]))
assert (encoded_pair == (((([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]) + text_2) + [tokenizer.sep_token_id])) |
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels], reuse=reuse):
inputs = slim.batch_norm(inputs, decay=0.1)
predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid, scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions |
_model
def resnetblur50(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['resnetblur50']
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, aa_layer=BlurPool2d, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def sortTable(gtruth, pred, scrres, truthlist):
a = list(range(number))
total = list(zip(gtruth, pred, scrres, truthlist))
total = sorted(total, key=(lambda x: x[3]), reverse=True)
srt = sorted(total, key=(lambda x: x[2]), reverse=True)
srt = [(a[i], srt[i][0], srt[i][1], srt[i][2], srt[i][3]) for i in range(number)]
return srt |
class Fourrooms():
def __init__(self):
layout = 'wwwwwwwwwwwww\nw w w\nw w w\nw w\nw w w\nw w w\nww wwww w\nw www www\nw w w\nw w w\nw w\nw w w\nwwwwwwwwwwwww\n'
self.occupancy = np.array([list(map((lambda c: (1 if (c == 'w') else 0)), line)) for line in layout.splitlines()])
self.action_space = spaces.Discrete(4)
self.observation_space = spaces.Discrete(np.sum((self.occupancy == 0)))
self.directions = [np.array(((- 1), 0)), np.array((1, 0)), np.array((0, (- 1))), np.array((0, 1))]
self.rng = np.random.RandomState(1234)
self.tostate = {}
statenum = 0
for i in range(13):
for j in range(13):
if (self.occupancy[(i, j)] == 0):
self.tostate[(i, j)] = statenum
statenum += 1
self.tocell = {v: k for (k, v) in self.tostate.items()}
self.goal = 62
self.init_states = list(range(self.observation_space.n))
self.init_states.remove(self.goal)
def empty_around(self, cell):
avail = []
for action in range(self.action_space.n):
nextcell = tuple((cell + self.directions[action]))
if (not self.occupancy[nextcell]):
avail.append(nextcell)
return avail
def reset(self):
state = self.rng.choice(self.init_states)
self.currentcell = self.tocell[state]
return state
def step(self, action):
nextcell = tuple((self.currentcell + self.directions[action]))
if (not self.occupancy[nextcell]):
self.currentcell = nextcell
if (self.rng.uniform() < (1 / 3.0)):
empty_cells = self.empty_around(self.currentcell)
self.currentcell = empty_cells[self.rng.randint(len(empty_cells))]
state = self.tostate[self.currentcell]
done = (state == self.goal)
return (state, float(done), done, None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.