code stringlengths 101 5.91M |
|---|
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None, optimizers=None):
assert isinstance(loss_scaler, ApexScaler)
if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)):
if (args.prefetcher and loader.mixup_enabled):
loader.mixup_enabled = False
elif (mixup_fn is not None):
mixup_fn.mixup_enabled = False
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = (len(loader) - 1)
num_updates = (epoch * len(loader))
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
data_time_m.update((time.time() - end))
if (not args.prefetcher):
(input, target) = (input.cuda(), target.cuda())
if (mixup_fn is not None):
(input, target) = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
delta = torch.zeros_like(input)
(output_cle, output_cle_kd) = model(input)
output_cle = output_cle.detach()
output_cle_kd = output_cle_kd.detach()
output_cle_prob = F.softmax(output_cle, dim=1)
output_cle_logprob = F.log_softmax(output_cle, dim=1)
for a_iter in range(args.adv_iters):
delta.requires_grad_()
with amp_autocast():
(output_adv, output_adv_kd) = model((input + delta))
loss_ce = loss_fn((input + delta), [output_adv, output_adv_kd], target)
output_adv_prob = F.softmax(output_adv, dim=1)
output_adv_logprob = F.log_softmax(output_adv, dim=1)
loss_kl = ((F.kl_div(output_adv_logprob, output_cle_prob, reduce=False).sum(dim=1) + F.kl_div(output_cle_logprob, output_adv_prob, reduce=False).sum(dim=1)).mean() / 2)
if (a_iter == 0):
loss = (loss_ce + (args.adv_kl_weight * loss_kl))
else:
loss = (((args.adv_ce_weight * loss_ce) + (args.adv_kl_weight * loss_kl)) / (args.adv_iters - 1))
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(retain_graph=False)
delta_grad = delta.grad.clone().detach().float()
delta = (delta + (args.adv_lr * torch.sign(delta_grad))).detach()
delta = torch.clamp(delta, (- args.adv_eps), args.adv_eps).detach()
if (not args.distributed):
losses_m.update(loss.item(), input.size(0))
optimizer.step()
optimizer.zero_grad()
if (model_ema is not None):
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update((time.time() - end))
if (last_batch or ((batch_idx % args.log_interval) == 0)):
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = (sum(lrl) / len(lrl))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if (args.rank == 0):
_logger.info('Train: {} [{:>4d}/{} ({:>3.0f}%)] Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) LR: {lr:.3e} Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(epoch, batch_idx, len(loader), ((100.0 * batch_idx) / last_idx), loss=losses_m, batch_time=batch_time_m, rate=((input.size(0) * args.world_size) / batch_time_m.val), rate_avg=((input.size(0) * args.world_size) / batch_time_m.avg), lr=lr, data_time=data_time_m))
if (args.save_images and output_dir):
torchvision.utils.save_image(input, os.path.join(output_dir, ('train-batch-%d.jpg' % batch_idx)), padding=0, normalize=True)
if ((saver is not None) and args.recovery_interval and (last_batch or (((batch_idx + 1) % args.recovery_interval) == 0))):
saver.save_recovery(epoch, batch_idx=batch_idx)
if (lr_scheduler is not None):
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)]) |
def traveltime(origin_id, destination_id, meters_per_minute, locations):
dist = np.sqrt((((locations.at[(destination_id, 'x')] - locations.at[(origin_id, 'x')]) ** 2) + ((locations.at[(destination_id, 'y')] - locations.at[(origin_id, 'y')]) ** 2)))
tt = np.ceil((dist / meters_per_minute))
return tt |
def learn(buffer, agent, actor_optimizer, critic_optimizer, target_entropy, critic_target_improvement, max_critic_updates_per_step, batch_size, gamma, critic_clip, actor_clip):
per = isinstance(buffer, replay.PrioritizedReplayBuffer)
if per:
(batch, imp_weights, priority_idxs) = buffer.sample(batch_size)
imp_weights = imp_weights.to(device)
else:
batch = buffer.sample(batch_size)
(state_batch, action_batch, reward_batch, next_state_batch, done_batch) = batch
state_batch = state_batch.to(device)
next_state_batch = next_state_batch.to(device)
action_batch = action_batch.to(device)
reward_batch = reward_batch.to(device)
done_batch = done_batch.to(device)
agent.train()
def min_and_argmin(x, y, x_args, y_args):
min_ = torch.min(x, y)
use_x_mask = (x <= y).squeeze(1)
argmin = y_args.clone()
argmin[use_x_mask] = x_args[use_x_mask]
return (min_, argmin)
def max_and_argmax(x, y, x_args, y_args):
max_ = torch.max(x, y)
use_x_mask = (x >= y).squeeze(1)
argmax = y_args.clone()
argmax[use_x_mask] = x_args[use_x_mask]
return (max_, argmax)
with torch.no_grad():
action_dist_s1 = agent.actor(next_state_batch)
action_s1 = action_dist_s1.sample()
action_value_s1_q1 = agent.critic1(next_state_batch, action_s1)
action_value_s1_q2 = agent.critic2(next_state_batch, action_s1)
cem_actions_s1_q1 = agent.cem.search(next_state_batch, action_s1, agent.critic1)
cem_action_value_s1_q1 = agent.critic1(next_state_batch, cem_actions_s1_q1)
cem_actions_s1_q2 = agent.cem.search(next_state_batch, action_s1, agent.critic2)
cem_action_value_s1_q2 = agent.critic2(next_state_batch, cem_actions_s1_q2)
(best_q1, best_actions_q1) = max_and_argmax(action_value_s1_q1, cem_action_value_s1_q1, action_s1, cem_actions_s1_q1)
(best_q2, best_actions_q2) = max_and_argmax(action_value_s1_q2, cem_action_value_s1_q2, action_s1, cem_actions_s1_q2)
(clipped_double_q_s1, final_actions_s1) = min_and_argmin(best_q1, best_q2, best_actions_q1, best_actions_q2)
td_target = (reward_batch + ((gamma * (1.0 - done_batch)) * clipped_double_q_s1))
y1 = agent.critic1(next_state_batch, final_actions_s1)
y2 = agent.critic2(next_state_batch, final_actions_s1)
learning_info = {'td_target': td_target.mean(), 'clip_double_q_s1_mean': clipped_double_q_s1.mean()}
critic_loss_initial = None
for critic_update in range(max_critic_updates_per_step):
a_critic1_pred = agent.critic1(state_batch, action_batch)
a_critic2_pred = agent.critic2(state_batch, action_batch)
td_error1 = (td_target - a_critic1_pred)
td_error2 = (td_target - a_critic2_pred)
a1_critic1_pred = agent.critic1(next_state_batch, final_actions_s1)
a1_critic2_pred = agent.critic2(next_state_batch, final_actions_s1)
a1_constraint1 = (y1 - a1_critic1_pred)
a1_constraint2 = (y2 - a1_critic2_pred)
elementwise_critic_loss = ((((td_error1 ** 2) + (td_error2 ** 2)) + (a1_constraint1 ** 2)) + (a1_constraint2 ** 2))
if per:
elementwise_loss *= imp_weights
critic_loss = (0.5 * elementwise_critic_loss.mean())
critic_optimizer.zero_grad()
critic_loss.backward()
if critic_clip:
torch.nn.utils.clip_grad_norm_(chain(agent.critic1.parameters(), agent.critic2.parameters()), critic_clip)
critic_optimizer.step()
if (critic_update == 0):
critic_loss_initial = critic_loss
elif (critic_loss <= (critic_target_improvement * critic_loss_initial)):
break
dist = agent.actor(state_batch)
agent_actions = dist.rsample()
logp_a = dist.log_prob(agent_actions).sum((- 1), keepdim=True)
with torch.no_grad():
agent_action_value = torch.min(agent.critic1(state_batch, agent_actions), agent.critic2(state_batch, agent_actions))
cem_actions_q1 = agent.cem.search(state_batch, agent_actions, agent.critic1)
cem_action_value_q1 = agent.critic1(state_batch, cem_actions_q1)
cem_actions_q2 = agent.cem.search(state_batch, agent_actions, agent.critic2)
cem_action_value_q2 = agent.critic2(state_batch, cem_actions_q2)
(cem_action_value, cem_actions) = min_and_argmin(cem_action_value_q1, cem_action_value_q2, cem_actions_q1, cem_actions_q2)
logp_cema = dist.log_prob(cem_actions).sum((- 1), keepdim=True)
cem_advantage = F.relu((cem_action_value - agent_action_value)).detach()
actor_loss = (- (cem_advantage * logp_cema).mean())
actor_optimizer.zero_grad()
actor_loss.backward()
if actor_clip:
torch.nn.utils.clip_grad_norm_(agent.actor.parameters(), actor_clip)
actor_optimizer.step()
learning_info.update({'cem_adv': cem_advantage.mean(), 'actor_loss': actor_loss, 'logp_a': logp_a.mean(), 'logp_cema': logp_cema.mean(), 'agent_action_value': agent_action_value.mean(), 'cem_action_value': cem_action_value.mean()})
if per:
new_priorities = (abs(td_error1) + 1e-05).cpu().detach().squeeze(1).numpy()
buffer.update_priorities(priority_idxs, new_priorities)
return learning_info |
class PersistentValue(Command):
def __init__(self, value):
super().__init__(duration=0)
if (abs(value) > 1):
raise PulseError('Absolute value of PV amplitude exceeds 1.')
self._value = complex(value)
def value(self):
return self._value
def __eq__(self, other):
if ((type(self) is type(other)) and (self.value == other.value)):
return True
return False
def __repr__(self):
return ('%s(%s, value=%s)' % (self.__class__.__name__, self.name, self.value))
def to_instruction(self, channel: PulseChannel, name=None) -> 'PersistentValueInstruction':
return PersistentValueInstruction(self, channel, name=name) |
class FixedLengthBatchSampler(Sampler):
def __init__(self, data_source, batch_size, include_partial=False, rng=None, maxlen=None, length_to_size=None):
self.data_source = data_source
self.active = False
if (rng is None):
rng = np.random.RandomState(seed=11)
self.rng = rng
self.batch_size = batch_size
self.maxlen = maxlen
self.include_partial = include_partial
self.length_to_size = length_to_size
self._batch_size_cache = {0: self.batch_size}
self.logger = get_logger()
def get_batch_size(self, length):
if (self.length_to_size is None):
return self.batch_size
if (length in self._batch_size_cache):
return self._batch_size_cache[length]
start = max(self._batch_size_cache.keys())
batch_size = self._batch_size_cache[start]
for n in range((start + 1), (length + 1)):
if (n in self.length_to_size):
batch_size = self.length_to_size[n]
self._batch_size_cache[n] = batch_size
return batch_size
def reset(self):
length_map = {}
for i in range(len(self.data_source)):
x = self.data_source.dataset[i]
length = len(x)
if ((self.maxlen is not None) and (self.maxlen > 0) and (length > self.maxlen)):
continue
length_map.setdefault(length, []).append(i)
for length in length_map.keys():
self.rng.shuffle(length_map[length])
state = {}
for (length, arr) in length_map.items():
batch_size = self.get_batch_size(length)
nbatches = (len(arr) // batch_size)
surplus = ((nbatches * batch_size) < len(arr))
state[length] = dict(nbatches=nbatches, surplus=surplus, position=(- 1))
order = []
for (length, v) in state.items():
order += ([length] * v['nbatches'])
if self.include_partial:
for (length, v) in state.items():
if v['surplus']:
order += [length]
self.rng.shuffle(order)
self.length_map = length_map
self.state = state
self.order = order
self.index = (- 1)
def get_next_batch(self):
index = (self.index + 1)
length = self.order[index]
batch_size = self.get_batch_size(length)
position = (self.state[length]['position'] + 1)
start = (position * batch_size)
batch_index = self.length_map[length][start:(start + batch_size)]
self.state[length]['position'] = position
self.index = index
return batch_index
def __iter__(self):
self.reset()
for _ in range(len(self)):
(yield self.get_next_batch())
def __len__(self):
return len(self.order) |
def add_our_config(cfg):
cfg.ORACLE = False
cfg.PSEUDO = False
cfg.PSEUDO_WITH_PRIOR = True
cfg.PSEUDO_REJECT_THRESHOLD = 0.0
cfg.TEST.SLIDING_WINDOW = False
cfg.TEST.SLIDING_TILE_SIZE = 224
cfg.TEST.SLIDING_OVERLAP = (2 / 3.0)
cfg.PSEUDO_FLAG_NAME = 'trainable_flag'
cfg.SOLVER.TEST_IMS_PER_BATCH = 1
cfg.DATASETS.SAMPLE_PER_CLASS = (- 1)
cfg.DATASETS.SAMPLE_SEED = 0
cfg.TEST.OPTIM = CN()
cfg.TEST.OPTIM.LR = 0.001
cfg.TEST.DENSE_CRF = False
cfg.MODEL.SEM_SEG_HEAD.EMBEDDING_DIM = 512
cfg.MODEL.SEM_SEG_HEAD.EMBED_HIDDEN_DIM = 1024
cfg.MODEL.SEM_SEG_HEAD.EMBED_LAYERS = 2
cfg.MODEL.CLIP_ADAPTER = CN()
cfg.MODEL.CLIP_ADAPTER.PROMPT_LEARNER = 'learnable'
cfg.MODEL.CLIP_ADAPTER.PREDEFINED_PROMPT_TEMPLATES = ['a sculpture of a {}.']
cfg.MODEL.CLIP_ADAPTER.PROMPT_DIM = 512
cfg.MODEL.CLIP_ADAPTER.PROMPT_SHAPE = (16, 0)
cfg.MODEL.CLIP_ADAPTER.TASK_PROMPT_SHAPE = 8
cfg.MODEL.CLIP_ADAPTER.PROMPT_CHECKPOINT = ''
cfg.MODEL.CLIP_ADAPTER.CLIP_MODEL_NAME = 'ViT-B/16'
cfg.MODEL.CLIP_ADAPTER.MASK_FILL = 'mean'
cfg.MODEL.CLIP_ADAPTER.MASK_EXPAND_RATIO = 1.0
cfg.MODEL.CLIP_ADAPTER.MASK_THR = 0.5
cfg.MODEL.CLIP_ADAPTER.MASK_MATTING = False
cfg.MODEL.CLIP_ADAPTER.REGION_RESIZED = True
cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE = True
cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE_WEIGHT = 0.8
cfg.MODEL.CLIP_ADAPTER.SEPERATE_ADAPTER = False
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER = CN()
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.CLIP_MODEL_NAME = 'ViT-B/16'
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PROMPT_LEARNER = 'predefined'
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PREDEFINED_PROMPT_TEMPLATES = ['a photo of a {}.']
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PROMPT_DIM = 512
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PROMPT_SHAPE = (16, 0)
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PROMPT_CHECKPOINT = ''
cfg.MODEL.SEM_SEG_HEAD.EMB_SIZE = 256
cfg.MODEL.SEM_SEG_HEAD.EMBED_DIM = 2048
cfg.MODEL.SEM_SEG_HEAD.NUM_HEADS = 8
cfg.MODEL.SEM_SEG_HEAD.USE_LAYER_SCALE = True
cfg.MODEL.CLIP_PIXEL_MEAN = [, , 104.]
cfg.MODEL.CLIP_PIXEL_STD = [68.5005327, 66.6321579, 70.323163]
cfg.MODEL.START_LAYERS = 8
cfg.MODEL.dis_weight = 0.1 |
class Timeslot():
def __init__(self, interval: Interval, channel: Channel):
self._interval = interval
self._channel = channel
def interval(self):
return self._interval
def channel(self):
return self._channel
def shift(self, time: int) -> 'Timeslot':
return Timeslot(self.interval.shift(time), self.channel)
def __eq__(self, other) -> bool:
if ((self.interval == other.interval) and (self.channel == other.channel)):
return True
return False |
def show_parameters(vrblvl=0):
if (vrblvl > 0):
print('in show_parameters ...')
phc = get_phcfun()
aaa = pointer(c_int32(0))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> show_parameters calls phc', end='')
retval = phc(194, aaa, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
class BoxPredictor(object):
def __init__(self, is_training, num_classes):
self._is_training = is_training
self._num_classes = num_classes
def num_classes(self):
return self._num_classes
def predict(self, image_features, num_predictions_per_location, scope, **params):
with tf.variable_scope(scope):
return self._predict(image_features, num_predictions_per_location, **params)
def _predict(self, image_features, num_predictions_per_location, **params):
pass |
def calculate_bleu(tgt, logits, vocab):
word_map = vocab.word2idx
pred = logits.max(2)[1]
references = list()
hypotheses = list()
img_caps = tgt.tolist()
img_captions = list(map((lambda c: [w for w in c if (w not in {word_map['<start>'], word_map['<end>'], word_map['<pad>']})]), img_caps))
references.append(img_captions)
hypotheses.append([w for w in seq if (w not in {word_map['<start>'], word_map['<end>'], word_map['<pad>']})])
bleu4 = sentence_bleu(references, hypotheses)
return bleu4 |
def _extract_images(filename, num_images):
print('Extracting images from: ', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read((((_IMAGE_SIZE * _IMAGE_SIZE) * num_images) * _NUM_CHANNELS))
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS)
return data |
class AverageMeter():
def __init__(self, dataset):
self.benchmark = dataset.benchmark
self.class_ids_interest = dataset.class_ids
self.class_ids_interest = torch.tensor(self.class_ids_interest).cuda()
if (self.benchmark == 'pascal'):
self.nclass = 20
elif (self.benchmark == 'coco'):
self.nclass = 80
elif (self.benchmark == 'fss'):
self.nclass = 1000
self.intersection_buf = torch.zeros([2, self.nclass]).float().cuda()
self.union_buf = torch.zeros([2, self.nclass]).float().cuda()
self.ones = torch.ones_like(self.union_buf)
self.loss_buf = []
def update(self, inter_b, union_b, class_id, loss):
self.intersection_buf.index_add_(1, class_id, inter_b.float())
self.union_buf.index_add_(1, class_id, union_b.float())
if (loss is None):
loss = torch.tensor(0.0)
self.loss_buf.append(loss)
def compute_iou(self):
iou = (self.intersection_buf.float() / torch.max(torch.stack([self.union_buf, self.ones]), dim=0)[0])
iou = iou.index_select(1, self.class_ids_interest)
miou = (iou[1].mean() * 100)
fb_iou = ((self.intersection_buf.index_select(1, self.class_ids_interest).sum(dim=1) / self.union_buf.index_select(1, self.class_ids_interest).sum(dim=1)).mean() * 100)
return (miou, fb_iou)
def write_result(self, split, epoch):
(iou, fb_iou) = self.compute_iou()
loss_buf = torch.stack(self.loss_buf)
msg = ('\n*** %s ' % split)
msg += ('[ %02d] ' % epoch)
msg += ('Avg L: %6.5f ' % loss_buf.mean())
msg += ('mIoU: %5.2f ' % iou)
msg += ('FB-IoU: %5.2f ' % fb_iou)
msg += '***\n'
Logger.info(msg)
def write_process(self, batch_idx, datalen, epoch, write_batch_idx=20):
if ((batch_idx % write_batch_idx) == 0):
msg = (('[Epoch: %02d] ' % epoch) if (epoch != (- 1)) else '')
msg += ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
(iou, fb_iou) = self.compute_iou()
if (epoch != (- 1)):
loss_buf = torch.stack(self.loss_buf)
msg += ('L: %6.5f ' % loss_buf[(- 1)])
msg += ('Avg L: %6.5f ' % loss_buf.mean())
msg += ('mIoU: %5.2f | ' % iou)
msg += ('FB-IoU: %5.2f' % fb_iou)
Logger.info(msg)
return (iou, fb_iou) |
def get_type(element):
for tag in element.findall('tag'):
if (tag.get('k') == 'type'):
return tag.get('v')
return None |
def save_args_txt(args, acc=None):
log_path = os.path.join(os.path.join(args.log_dir, 'args.txt'))
if (acc and is_main_process()):
with open(log_path, 'a') as f:
f.write('\n')
f.write(f'Final Best Acc: {acc:.2f}%')
return
with open(log_path, 'w') as f:
for (key, value) in vars(args).items():
f.write(('%s: %s\n' % (key, value)))
print(f'Save config to: {log_path}') |
class AttentionStore(AttentionControl):
def __init__(self):
super(AttentionStore, self).__init__()
self.step_store = self.get_empty_store()
self.attention_store = {}
def get_empty_store():
return {'down_cross': [], 'mid_cross': [], 'up_cross': [], 'down_self': [], 'mid_self': [], 'up_self': []}
def forward(self, attn, is_cross: bool, place_in_unet: str):
key = f"{place_in_unet}_{('cross' if is_cross else 'self')}"
if (attn.shape[1] <= (32 ** 2)):
self.step_store[key].append(attn)
return attn
def between_steps(self):
if (len(self.attention_store) == 0):
self.attention_store = self.step_store
else:
for key in self.attention_store:
for i in range(len(self.attention_store[key])):
self.attention_store[key][i] += self.step_store[key][i]
self.step_store = self.get_empty_store()
def get_average_attention(self):
average_attention = {key: [(item / self.cur_step) for item in self.attention_store[key]] for key in self.attention_store}
return average_attention
def reset(self):
super(AttentionStore, self).reset()
self.step_store = self.get_empty_store()
self.attention_store = {} |
def download_diagnostic(data_dir):
print('Downloading and extracting diagnostic...')
if (not os.path.isdir(os.path.join(data_dir, 'diagnostic'))):
os.mkdir(os.path.join(data_dir, 'diagnostic'))
data_file = os.path.join(data_dir, 'diagnostic', 'diagnostic.tsv')
urllib.request.urlretrieve(TASK2PATH['diagnostic'], data_file)
print('\tCompleted!')
return |
class Pytorch1_11():
def test_bf16_pytorch_1_11(self):
model = resnet18(num_classes=10)
x = torch.rand((10, 3, 256, 256))
with pytest.raises(RuntimeError, match='Require torch>=1.12 to obtain bfloat16 acceleration.'):
bf16_model = InferenceOptimizer.quantize(model, precision='bf16') |
def caltech256():
return collect_download_configs((lambda : datasets.Caltech256(ROOT, download=True)), name='Caltech256') |
def get_batch(data_iterator, timers):
keys = ['text', 'types', 'is_random', 'mask', 'mask_labels', 'pad_mask']
datatype = torch.int64
timers('data loader').start()
if (data_iterator is not None):
data = next(data_iterator)
else:
data = None
timers('data loader').stop()
data_b = mpu.broadcast_data(keys, data, datatype)
tokens = data_b['text'].long()
types = data_b['types'].long()
next_sentence = data_b['is_random'].long()
loss_mask = data_b['mask'].float()
lm_labels = data_b['mask_labels'].long()
padding_mask = data_b['pad_mask'].byte()
return (tokens, types, next_sentence, loss_mask, lm_labels, padding_mask) |
def plot_embedding(X, Y):
(x_min, x_max) = (np.min(X, 0), np.max(X, 0))
X = ((X - x_min) / (x_max - x_min))
plt.figure(figsize=(10, 10))
for i in xrange(X.shape[0]):
plt.text(X[(i, 0)], X[(i, 1)], str(Y[i]), color=plt.cm.Set1((Y[i] / 10.0)), fontdict={'weight': 'bold', 'size': 12})
plt.savefig('a.jpg') |
class Operation():
def __init__(self, print_symbol, target_state, verbosity):
self.print_symbol = print_symbol
self.target_state = target_state
self.verbosity = verbosity
def execute(self, tape):
tape.write(self.print_symbol)
r = False
if self.target_state:
if (self.target_state.direction == 'R'):
tape.shift_right()
if (self.target_state.direction == 'L'):
tape.shift_left()
return self.target_state
else:
raise HaltException() |
def create_dag_metadata() -> Dict[(int, Dict[(str, Union[(List[int], List[str], Dict[(str, Dict[(str, str)])])])])]:
flow_ = flow()
cell_num_to_used_imports: Dict[(int, Set[Symbol])] = defaultdict(set)
cell_num_to_inputs: Dict[(int, Set[Symbol])] = defaultdict(set)
cell_num_to_outputs: Dict[(int, Set[Symbol])] = defaultdict(set)
cell_num_to_cell_parents: Dict[(int, Set[int])] = defaultdict(set)
cell_num_to_cell_children: Dict[(int, Set[int])] = defaultdict(set)
for sym in flow_.all_data_symbols():
top_level_sym = sym.get_top_level()
if ((top_level_sym is None) or (not top_level_sym.is_globally_accessible) or top_level_sym.is_anonymous or (top_level_sym.name == '_')):
continue
if (top_level_sym.is_module and any(((alias.is_import and (alias.name == top_level_sym.name)) for alias in top_level_sym.aliases))):
continue
for (used_time, sym_timestamp_when_used) in itertools.chain(sym.timestamp_by_used_time.items(), sym.timestamp_by_liveness_time.items()):
if top_level_sym.is_import:
cell_num_to_used_imports[used_time.cell_num].add(top_level_sym)
elif (used_time.cell_num != sym_timestamp_when_used.cell_num):
cell_num_to_cell_parents[used_time.cell_num].add(sym_timestamp_when_used.cell_num)
cell_num_to_cell_children[sym_timestamp_when_used.cell_num].add(used_time.cell_num)
cell_num_to_inputs[used_time.cell_num].add(top_level_sym)
cell_num_to_outputs[sym_timestamp_when_used.cell_num].add(top_level_sym)
if (not top_level_sym.is_import):
for updated_time in sym.updated_timestamps:
cell_num_to_outputs[updated_time.cell_num].add(top_level_sym)
cell_metadata: Dict[(int, Dict[(str, Union[(List[int], List[str], Dict[(str, Dict[(str, str)])])])])] = {}
all_relevant_cells = ((((cell_num_to_used_imports.keys() | cell_num_to_inputs.keys()) | cell_num_to_outputs.keys()) | cell_num_to_cell_parents.keys()) | cell_num_to_cell_children.keys())
for cell_num in all_relevant_cells:
cell_imports = [sym.get_import_string() for sym in cell_num_to_used_imports[cell_num]]
input_symbols = {str(sym): {'type': sym.get_type_annotation_string()} for sym in cell_num_to_inputs[cell_num]}
output_symbols = {str(sym): {'type': sym.get_type_annotation_string()} for sym in cell_num_to_outputs[cell_num]}
parent_cells = list(cell_num_to_cell_parents[cell_num])
child_cells = list(cell_num_to_cell_children[cell_num])
cell_metadata[cell_num] = {'cell_imports': cell_imports, 'input_symbols': input_symbols, 'output_symbols': output_symbols, 'parent_cells': parent_cells, 'child_cells': child_cells}
return cell_metadata |
def decompositCommand(command_string):
command_list = []
each_command = []
num_select = ''
for idx in range(0, len(command_string)):
if command_string[idx].isdigit():
num_select += command_string[idx]
else:
each_command.append(num_select)
each_command.append(command_string[idx])
command_list.append(each_command)
each_command = []
num_select = ''
return command_list |
def title2anchor(name):
return re.sub('-+', '-', re.sub('[^a-zA-Z0-9]', '-', name.strip().lower())).strip('-') |
class TestWrappers(unittest.TestCase):
def test_A_matrix_stub(self):
model_labels = {'observations': {'grass_observation': ['wet', 'dry'], 'weather_observation': ['clear', 'rainy', 'cloudy']}, 'states': {'weather_state': ['raining', 'clear'], 'sprinkler_state': ['on', 'off']}}
num_hidden_state_factors = len(model_labels['states'])
expected_A_matrix_stub = create_A_matrix_stub(model_labels)
temporary_file_path = (tmp_path / 'A_matrix_stub.xlsx').resolve()
expected_A_matrix_stub.to_excel(temporary_file_path)
actual_A_matrix_stub = read_A_matrix(temporary_file_path, num_hidden_state_factors)
os.remove(temporary_file_path)
frames_are_equal = (assert_frame_equal(expected_A_matrix_stub, actual_A_matrix_stub) is None)
self.assertTrue(frames_are_equal)
def test_B_matrix_stub(self):
model_labels = {'observations': {'reward outcome': ['win', 'loss']}, 'states': {'location': ['start', 'arm1', 'arm2'], 'bandit_state': ['high_rew', 'low_rew']}, 'actions': {'arm_play': ['play_arm1', 'play_arm2'], 'bandit_state_control': ['null']}}
B_stubs = create_B_matrix_stubs(model_labels)
xls_path = (tmp_path / 'B_matrix_stubs.xlsx').resolve()
with pd.ExcelWriter(xls_path) as writer:
for (factor_name, B_stub_f) in B_stubs.items():
B_stub_f.to_excel(writer, ('%s' % factor_name))
read_in_B_stubs = read_B_matrices(xls_path)
os.remove(xls_path)
all_stub_compares = [assert_frame_equal(stub_og, stub_read_in) for (stub_og, stub_read_in) in zip(*[B_stubs.values(), read_in_B_stubs.values()])]
self.assertTrue(all(((stub_compare is None) for stub_compare in all_stub_compares))) |
class BertForMaskedLM():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def _aatype_to_str_sequence(aatype):
return ''.join([residue_constants.restypes_with_x[aatype[i]] for i in range(len(aatype))]) |
def download_pretrained_weights():
import urllib.request
import tarfile
logging.info(f'Downloading ImageNet pretrained weights for {FLAGS.architecture}')
filename = f'{FLAGS.architecture}_2017_04_14.tar.gz'
target_path = f'{paths.DATA_ROOT}/pretrained/{FLAGS.architecture}_2017_04_14/{filename}'
util.ensure_path_exists(target_path)
urllib.request.urlretrieve(f' target_path)
with tarfile.open(target_path) as f:
f.extractall(f'{paths.DATA_ROOT}/pretrained/{FLAGS.architecture}_2017_04_14')
os.remove(target_path) |
def gen_iterator(out_path, dataset, gen_p):
global gen
gen = gen_p
if (not os.path.exists(out_path)):
os.makedirs(out_path)
print(out_path)
loader = dataset.get_loader(shuffle=True)
for (i, data) in tqdm(enumerate(loader)):
path = os.path.normpath(data['path'][0])
export_path = (out_path + '/generation/{}/{}/'.format(path.split(os.sep)[(- 2)], path.split(os.sep)[(- 1)]))
if os.path.exists(export_path):
print('Path exists - skip! {}'.format(export_path))
continue
else:
os.makedirs(export_path)
for num_steps in [7]:
(point_cloud, duration) = gen.generate_point_cloud(data, num_steps)
np.savez((export_path + 'dense_point_cloud_{}'.format(num_steps)), point_cloud=point_cloud, duration=duration)
print('num_steps', num_steps, 'duration', duration)
trimesh.Trimesh(vertices=point_cloud, faces=[]).export((export_path + 'dense_point_cloud_{}.off'.format(num_steps))) |
def train(P, opt, train_fn, models, optimizers, train_loader, logger):
(generator, discriminator, g_ema) = models
(opt_G, opt_D) = optimizers
losses = {'G_loss': [], 'D_loss': [], 'D_penalty': [], 'D_real': [], 'D_gen': [], 'D_r1': []}
metrics = {}
metrics['image_grid'] = ImageGrid(volatile=P.no_gif)
metrics['fixed_gen'] = FixedSampleGeneration(g_ema, volatile=P.no_gif)
if (not P.no_fid):
metrics['fid_score'] = FIDScore(opt['dataset'], opt['fid_size'], P.n_eval_avg)
logger.log_dirname('Steps {}'.format(P.starting_step))
for step in range(P.starting_step, (opt['max_steps'] + 1)):
d_regularize = (((step % P.d_reg_every) == 0) and (P.lbd_r1 > 0))
if P.use_warmup:
_update_warmup(opt_G, step, opt['warmup'], opt['lr'])
_update_warmup(opt_D, step, opt['warmup'], opt['lr_d'])
if ((not P.use_warmup) or (step > opt['warmup'])):
cur_lr_g = _update_lr(opt_G, step, opt['batch_size'], P.halflife_lr, opt['lr'])
cur_lr_d = _update_lr(opt_D, step, opt['batch_size'], P.halflife_lr, opt['lr_d'])
if (cur_lr_d and cur_lr_g):
logger.log(('LR Updated: [G %.5f] [D %.5f]' % (cur_lr_g, cur_lr_d)))
do_ema = ((step * opt['batch_size']) > (P.ema_start_k * 1000))
accum = (P.accum if do_ema else 0)
accumulate(g_ema, generator, accum)
generator.train()
discriminator.train()
(images, labels) = next(train_loader)
images = images.cuda()
set_grad(generator, True)
set_grad(discriminator, False)
gen_images = _sample_generator(generator, images.size(0), style_mix=P.style_mix, enable_grad=True)
g_loss = train_fn['G'](P, discriminator, opt, images, gen_images)
opt_G.zero_grad()
g_loss.backward()
opt_G.step()
losses['G_loss'].append(g_loss.item())
set_grad(generator, False)
set_grad(discriminator, True)
if d_regularize:
images.requires_grad = True
(d_loss, aux) = train_fn['D'](P, discriminator, opt, images, gen_images)
loss = (d_loss + aux['penalty'])
if d_regularize:
r1 = r1_loss(discriminator, images, P.augment_fn)
lazy_r1 = (((0.5 * P.lbd_r1) * r1) * P.d_reg_every)
loss = (loss + lazy_r1)
losses['D_r1'].append(r1.item())
opt_D.zero_grad()
loss.backward()
opt_D.step()
losses['D_loss'].append(d_loss.item())
losses['D_real'].append(aux['d_real'].item())
losses['D_gen'].append(aux['d_gen'].item())
losses['D_penalty'].append(aux['penalty'].item())
for i in range((opt['n_critic'] - 1)):
(images, labels) = next(train_loader)
images = images.cuda()
gen_images = _sample_generator(generator, images.size(0), style_mix=P.style_mix, enable_grad=False)
(d_loss, aux) = train_fn['D'](P, discriminator, opt, images, gen_images)
loss = (d_loss + aux['penalty'])
opt_D.zero_grad()
loss.backward()
opt_D.step()
generator.eval()
discriminator.eval()
if ((step % P.print_every) == 0):
logger.log(('[Steps %7d] [G %.3f] [D %.3f]' % (step, losses['G_loss'][(- 1)], losses['D_loss'][(- 1)])))
for name in losses:
values = losses[name]
if (len(values) > 0):
logger.scalar_summary(('gan/train/' + name), values[(- 1)], step)
if ((step % P.evaluate_every) == 0):
logger.log_dirname('Steps {}'.format((step + 1)))
fid_score = metrics.get('fid_score')
fixed_gen = metrics.get('fixed_gen')
image_grid = metrics.get('image_grid')
if fid_score:
fid_avg = fid_score.update(step, g_ema)
fid_score.save((logger.logdir + f'/results_fid_{P.eval_seed}.csv'))
logger.scalar_summary('gan/test/fid', fid_avg, step)
logger.scalar_summary('gan/test/fid/best', fid_score.best, step)
if (not P.no_gif):
_ = fixed_gen.update(step)
imageio.mimsave((logger.logdir + f'/training_progress_{P.eval_seed}.gif'), fixed_gen.summary())
aug_grid = image_grid.update(step, P.augment_fn(images))
imageio.imsave((logger.logdir + f'/real_augment_{P.eval_seed}.jpg'), aug_grid)
G_state_dict = generator.module.state_dict()
D_state_dict = discriminator.module.state_dict()
Ge_state_dict = g_ema.state_dict()
torch.save(G_state_dict, (logger.logdir + '/gen.pt'))
torch.save(D_state_dict, (logger.logdir + '/dis.pt'))
torch.save(Ge_state_dict, (logger.logdir + '/gen_ema.pt'))
if (fid_score and fid_score.is_best):
torch.save(G_state_dict, (logger.logdir + '/gen_best.pt'))
torch.save(D_state_dict, (logger.logdir + '/dis_best.pt'))
torch.save(Ge_state_dict, (logger.logdir + '/gen_ema_best.pt'))
if ((step % P.save_every) == 0):
torch.save(G_state_dict, (logger.logdir + f'/gen_{step}.pt'))
torch.save(D_state_dict, (logger.logdir + f'/dis_{step}.pt'))
torch.save(Ge_state_dict, (logger.logdir + f'/gen_ema_{step}.pt'))
torch.save({'epoch': step, 'optim_G': opt_G.state_dict(), 'optim_D': opt_D.state_dict()}, (logger.logdir + '/optim.pt')) |
_model
def ese_vovnet39b(pretrained=False, **kwargs):
return _vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) |
class AttributeDatasetArgs():
dataset_name: str = field(metadata={'alias': '-d', 'help': 'The type of dataset to be loaded for attribution.'})
input_text_field: Optional[str] = field(metadata={'alias': '-f', 'help': 'Name of the field containing the input texts used for attribution.'})
generated_text_field: Optional[str] = field(default=None, metadata={'alias': '-fgen', 'help': 'Name of the field containing the generated texts used for constrained decoding.'})
dataset_config: Optional[str] = field(default=None, metadata={'alias': '-dconf', 'help': 'The name of the Huggingface dataset configuration.'})
dataset_dir: Optional[str] = field(default=None, metadata={'alias': '-ddir', 'help': 'Path to the directory containing the data files.'})
dataset_files: Optional[List[str]] = field(default=None, metadata={'alias': '-dfiles', 'help': 'Path to the dataset files.'})
dataset_split: Optional[str] = field(default='train', metadata={'alias': '-dsplit', 'help': 'Dataset split.'})
dataset_revision: Optional[str] = field(default=None, metadata={'alias': '-drev', 'help': 'The Huggingface dataset revision.'})
dataset_auth_token: Optional[str] = field(default=None, metadata={'alias': '-dauth', 'help': 'The auth token for the Huggingface dataset.'}) |
class ProjectionUpdater(nn.Module):
def __init__(self, instance, feature_redraw_interval):
super().__init__()
self.instance = instance
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
def fix_projections_(self):
self.feature_redraw_interval = None
def redraw_projections(self):
model = self.instance
if (not self.training):
return
if (exists(self.feature_redraw_interval) and (self.calls_since_last_redraw >= self.feature_redraw_interval)):
device = get_module_device(model)
fast_attentions = find_modules(model, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def forward(self, x):
raise NotImplemented |
class VQADataset():
def __init__(self, dataset_type, questions_path, answers_path, images_path, tokenizer_path, vocab_size=20000, question_max_len=None):
if isinstance(dataset_type, DatasetType):
self.dataset_type = dataset_type
else:
raise TypeError('dataset_type has to be one of the DatasetType enum values')
if os.path.isfile(questions_path):
self.questions_path = questions_path
else:
raise ValueError((('The file ' + questions_path) + 'does not exists'))
if os.path.isdir(images_path):
self.images_path = images_path
else:
raise ValueError((('The directory ' + images_path) + ' does not exists'))
if (self.dataset_type == DatasetType.TRAIN):
self.features_path = (images_path + 'train_ImageNet_FisherVectors.mat')
elif ((self.dataset_type == DatasetType.VALIDATION) or (self.dataset_type == DatasetType.EVAL)):
self.features_path = (images_path + 'val_ImageNet_FisherVectors.mat')
else:
self.features_path = (images_path + 'test_ImageNet_FisherVectors.mat')
self.answers_path = answers_path
if (answers_path and (not os.path.isfile(answers_path))):
raise ValueError((('The directory ' + images_path) + ' does not exists'))
elif ((not answers_path) and ((dataset_type != DatasetType.TEST) and (dataset_type != DatasetType.EVAL))):
raise ValueError('You have to provide an answers path')
self.vocab_size = vocab_size
self.tokenizer_path = tokenizer_path
tokenizer_dir = os.path.dirname(os.path.abspath(self.tokenizer_path))
if (not os.path.isdir(tokenizer_dir)):
os.mkdir(tokenizer_dir)
if os.path.isfile(self.tokenizer_path):
self.tokenizer = pickle.load(open(self.tokenizer_path, 'r'))
else:
self.tokenizer = Tokenizer(nb_words=self.vocab_size)
self.question_max_len = question_max_len
self.samples = []
def prepare(self):
questions = self._create_questions_dict(self.questions_path)
print('Questions dict created')
answers = self._create_answers_dict(self.answers_path)
print('Answers dict created')
image_ids = self._get_image_ids(self.images_path)
images = self._create_images_dict(image_ids)
print('Images dict created')
self._init_tokenizer(questions, answers)
aux_len = 0
for (_, question) in questions.iteritems():
question.tokenize(self.tokenizer)
if (question.get_tokens_length() > aux_len):
aux_len = question.get_tokens_length()
if (not self.question_max_len):
self.question_max_len = aux_len
for (_, answer) in answers.iteritems():
answer.tokenize(self.tokenizer)
print('Tokenizer created')
self._create_samples(images, questions, answers)
def batch_generator(self, batch_size):
print('Loading visual features...')
features = scipy.io.loadmat(self.features_path)['features']
for sample in self.samples:
sample.image.load(features, True)
print('Visual features loaded')
num_samples = len(self.samples)
batch_start = 0
batch_end = batch_size
while True:
I = np.zeros((batch_size, 1024), dtype=np.float16)
Q = np.zeros((batch_size, self.question_max_len), dtype=np.int32)
A = np.zeros((batch_size, self.vocab_size), dtype=np.bool_)
for (idx, sample) in enumerate(self.samples[batch_start:batch_end]):
(I[idx], Q[idx]) = sample.get_input(self.question_max_len)
A[idx] = sample.get_output()
(yield ([I, Q], A))
batch_start += batch_size
if (batch_start >= num_samples):
batch_start = 0
random.shuffle(self.samples)
batch_end = (batch_start + batch_size)
if (batch_end > num_samples):
batch_end = num_samples
def get_dataset_input(self):
features = scipy.io.loadmat(self.features_path)['features']
for sample in self.samples:
sample.image.load(features, True)
images_list = []
questions_list = []
for sample in self.samples:
images_list.append(sample.get_input(self.question_max_len)[0])
questions_list.append(sample.get_input(self.question_max_len)[1])
return (np.array(images_list), np.array(questions_list))
def get_dataset_output(self):
output_array = [sample.get_output() for sample in self.samples]
print('output_array list created')
return np.array(output_array).astype(np.bool_)
def size(self):
return len(self.samples)
def _create_questions_dict(self, questions_json_path):
questions_json = json.load(open(questions_json_path))
questions = {question['question_id']: Question(question['question_id'], question['question'].encode('utf8'), question['image_id'], self.vocab_size) for question in questions_json['questions']}
return questions
def _create_answers_dict(self, answers_json_path):
if ((self.dataset_type == DatasetType.TEST) or (self.dataset_type == DatasetType.EVAL)):
return {}
answers_json = json.load(open(answers_json_path))
answers = {((annotation['question_id'] * 10) + (answer['answer_id'] - 1)): Answer(answer['answer_id'], answer['answer'].encode('utf8'), annotation['question_id'], annotation['image_id'], self.vocab_size) for annotation in answers_json['annotations'] for answer in annotation['answers']}
return answers
def _create_images_dict(self, image_ids):
images = {image_id: Image(image_id, features_idx) for (image_id, features_idx) in image_ids.iteritems()}
return images
def _create_samples(self, images, questions, answers):
if ((self.dataset_type != DatasetType.TEST) and (self.dataset_type != DatasetType.EVAL)):
for (answer_id, answer) in answers.iteritems():
question = questions[answer.question_id]
image_id = question.image_id
image = images[image_id]
self.samples.append(VQASample(question, image, answer, self.dataset_type))
else:
for (question_id, question) in questions.iteritems():
image_id = question.image_id
image = images[image_id]
self.samples.append(VQASample(question, image, dataset_type=self.dataset_type))
def _init_tokenizer(self, questions, answers):
if (not hasattr(self.tokenizer, 'word_index')):
questions_list = [question.question for (_, question) in questions.iteritems()]
answers_list = [answer.answer for (_, answer) in answers.iteritems()]
self.tokenizer.fit_on_texts((questions_list + answers_list))
pickle.dump(self.tokenizer, open(self.tokenizer_path, 'w'))
def _get_image_ids(self, images_path):
if (self.dataset_type == DatasetType.TRAIN):
id_start = len('COCO_train2014_')
image_ids_path = (images_path + 'train_list.txt')
elif ((self.dataset_type == DatasetType.VALIDATION) or (self.dataset_type == DatasetType.EVAL)):
id_start = len('COCO_val2014_')
image_ids_path = (images_path + 'val_list.txt')
else:
id_start = len('COCO_test2015_')
image_ids_path = (images_path + 'test_list.txt')
id_end = (id_start + 12)
with open(image_ids_path, 'r') as f:
tmp = f.read()
image_ids = tmp.split('\n')
image_ids.remove('')
image_ids = map((lambda x: int(x[id_start:id_end])), image_ids)
image_ids_dict = {}
for (idx, image_id) in enumerate(image_ids):
image_ids_dict[image_id] = idx
return image_ids_dict |
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += ((batch_size * input.shape[1]) * output.shape[1]) |
def _get_p_r_f1(tp, fp, fn):
p = round(((tp / (tp + fp)) if ((tp > 0) or (fp > 0)) else 0.0), ndigits=4)
r = round(((tp / (tp + fn)) if ((tp > 0) or (fn > 0)) else 0.0), ndigits=4)
f1 = round(((((2 * p) * r) / (p + r)) if ((p > 0) or (r > 0)) else 0.0), ndigits=4)
return (p, r, f1) |
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print('Full loading failed!! Try partial loading!!')
own_state = model.state_dict()
for (name, param) in state_dict.items():
if (name not in own_state):
print(('Skipped: ' + name))
continue
if isinstance(param, torch.nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
print(('Successfully loaded: ' + name))
except:
print(('Part load failed: ' + name))
print('\n\n') |
def train_atari(args):
gin.parse_config_file(args.config)
def make_env():
return super_sac.wrappers.load_atari(args.game, frame_skip=4)
train_env = super_sac.wrappers.Uint8Wrapper(super_sac.wrappers.ParallelActors(make_env, args.parallel_actors))
test_env = super_sac.wrappers.Uint8Wrapper(make_env())
img_shape = train_env.observation_space.shape
agent = super_sac.Agent(act_space_size=train_env.action_space.n, encoder=AtariEncoder(img_shape, emb_dim=128))
buffer = super_sac.replay.ReplayBuffer(size=1000000)
super_sac.super_sac(agent=agent, train_env=train_env, test_env=test_env, buffer=buffer, name=args.name, logging_method=args.logging, augmenter=AugmentationSequence([Drqv2Aug(128)])) |
def generate_aug_list(merged_list, excluded_list):
return list((set(merged_list) - set(excluded_list))) |
class TestInsertInputOuputData(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
def test_input_output_data(self):
graph = Graph()
graph.framework_modeling_config['framework'] = 'onnxruntime'
input_data_node = OPERATORS['ONNXINPUT']()
input_tensors = []
output_tensors = [Tensor()]
input_data_node.construct('input_data', 'ONNXINPUT', input_tensors=input_tensors, output_tensors=output_tensors)
graph.insert_nodes(0, [input_data_node])
graph = InputData()(graph)
graph = OutputData()(graph)
self.assertEqual(2, len(graph.nodes))
self.assertEqual('Input', graph.nodes[0].op_type)
self.assertEqual('Output', graph.nodes[1].op_type) |
def expand_minimum_ndim(a, target_dim, axis=(- 1)):
if is_tf_data(a):
cur_dim = len(a.get_shape())
b = a
for i in range(cur_dim, target_dim):
b = tf.expand_dims(b, axis=axis)
else:
if isinstance(a, np.ndarray):
b = a
else:
b = np.array(a).astype(np.float32)
cur_dim = len(b.shape)
for i in range(cur_dim, target_dim):
b = np.expand_dims(b, axis=axis)
return b |
class ElectraConfig(PretrainedConfig):
model_type = 'electra'
def __init__(self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, summary_type='first', summary_use_proj=True, summary_activation='gelu', summary_last_dropout=0.1, pad_token_id=0, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_last_dropout = summary_last_dropout |
class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin):
_compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers]
dtype: jnp.dtype
pndm_order: int
def has_state(self):
return True
_to_config
def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[jnp.ndarray]=None, skip_prk_steps: bool=False, set_alpha_to_one: bool=False, steps_offset: int=0, prediction_type: str='epsilon', dtype: jnp.dtype=jnp.float32):
self.dtype = dtype
self.pndm_order = 4
def create_state(self, common: Optional[CommonSchedulerState]=None) -> PNDMSchedulerState:
if (common is None):
common = CommonSchedulerState.create(self)
final_alpha_cumprod = (jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0])
init_noise_sigma = jnp.array(1.0, dtype=self.dtype)
timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::(- 1)]
return PNDMSchedulerState.create(common=common, final_alpha_cumprod=final_alpha_cumprod, init_noise_sigma=init_noise_sigma, timesteps=timesteps)
def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState:
step_ratio = (self.config.num_train_timesteps // num_inference_steps)
_timesteps = ((jnp.arange(0, num_inference_steps) * step_ratio).round() + self.config.steps_offset)
if self.config.skip_prk_steps:
prk_timesteps = jnp.array([], dtype=jnp.int32)
plms_timesteps = jnp.concatenate([_timesteps[:(- 1)], _timesteps[(- 2):(- 1)], _timesteps[(- 1):]])[::(- 1)]
else:
prk_timesteps = (_timesteps[(- self.pndm_order):].repeat(2) + jnp.tile(jnp.array([0, ((self.config.num_train_timesteps // num_inference_steps) // 2)], dtype=jnp.int32), self.pndm_order))
prk_timesteps = prk_timesteps[:(- 1)].repeat(2)[1:(- 1)][::(- 1)]
plms_timesteps = _timesteps[:(- 3)][::(- 1)]
timesteps = jnp.concatenate([prk_timesteps, plms_timesteps])
cur_model_output = jnp.zeros(shape, dtype=self.dtype)
counter = jnp.int32(0)
cur_sample = jnp.zeros(shape, dtype=self.dtype)
ets = jnp.zeros(((4,) + shape), dtype=self.dtype)
return state.replace(timesteps=timesteps, num_inference_steps=num_inference_steps, prk_timesteps=prk_timesteps, plms_timesteps=plms_timesteps, cur_model_output=cur_model_output, counter=counter, cur_sample=cur_sample, ets=ets)
def scale_model_input(self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int]=None) -> jnp.ndarray:
return sample
def step(self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, return_dict: bool=True) -> Union[(FlaxPNDMSchedulerOutput, Tuple)]:
if (state.num_inference_steps is None):
raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler")
if self.config.skip_prk_steps:
(prev_sample, state) = self.step_plms(state, model_output, timestep, sample)
else:
(prk_prev_sample, prk_state) = self.step_prk(state, model_output, timestep, sample)
(plms_prev_sample, plms_state) = self.step_plms(state, model_output, timestep, sample)
cond = (state.counter < len(state.prk_timesteps))
prev_sample = jax.lax.select(cond, prk_prev_sample, plms_prev_sample)
state = state.replace(cur_model_output=jax.lax.select(cond, prk_state.cur_model_output, plms_state.cur_model_output), ets=jax.lax.select(cond, prk_state.ets, plms_state.ets), cur_sample=jax.lax.select(cond, prk_state.cur_sample, plms_state.cur_sample), counter=jax.lax.select(cond, prk_state.counter, plms_state.counter))
if (not return_dict):
return (prev_sample, state)
return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state)
def step_prk(self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray) -> Union[(FlaxPNDMSchedulerOutput, Tuple)]:
if (state.num_inference_steps is None):
raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler")
diff_to_prev = jnp.where((state.counter % 2), 0, ((self.config.num_train_timesteps // state.num_inference_steps) // 2))
prev_timestep = (timestep - diff_to_prev)
timestep = state.prk_timesteps[((state.counter // 4) * 4)]
model_output = jax.lax.select(((state.counter % 4) != 3), model_output, (state.cur_model_output + ((1 / 6) * model_output)))
state = state.replace(cur_model_output=jax.lax.select_n((state.counter % 4), (state.cur_model_output + ((1 / 6) * model_output)), (state.cur_model_output + ((1 / 3) * model_output)), (state.cur_model_output + ((1 / 3) * model_output)), jnp.zeros_like(state.cur_model_output)), ets=jax.lax.select(((state.counter % 4) == 0), state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), state.ets), cur_sample=jax.lax.select(((state.counter % 4) == 0), sample, state.cur_sample))
cur_sample = state.cur_sample
prev_sample = self._get_prev_sample(state, cur_sample, timestep, prev_timestep, model_output)
state = state.replace(counter=(state.counter + 1))
return (prev_sample, state)
def step_plms(self, state: PNDMSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray) -> Union[(FlaxPNDMSchedulerOutput, Tuple)]:
if (state.num_inference_steps is None):
raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler")
prev_timestep = (timestep - (self.config.num_train_timesteps // state.num_inference_steps))
prev_timestep = jnp.where((prev_timestep > 0), prev_timestep, 0)
prev_timestep = jnp.where((state.counter == 1), timestep, prev_timestep)
timestep = jnp.where((state.counter == 1), (timestep + (self.config.num_train_timesteps // state.num_inference_steps)), timestep)
state = state.replace(ets=jax.lax.select((state.counter != 1), state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), state.ets), cur_sample=jax.lax.select((state.counter != 1), sample, state.cur_sample))
state = state.replace(cur_model_output=jax.lax.select_n(jnp.clip(state.counter, 0, 4), model_output, ((model_output + state.ets[(- 1)]) / 2), (((3 * state.ets[(- 1)]) - state.ets[(- 2)]) / 2), ((((23 * state.ets[(- 1)]) - (16 * state.ets[(- 2)])) + (5 * state.ets[(- 3)])) / 12), ((1 / 24) * ((((55 * state.ets[(- 1)]) - (59 * state.ets[(- 2)])) + (37 * state.ets[(- 3)])) - (9 * state.ets[(- 4)])))))
sample = state.cur_sample
model_output = state.cur_model_output
prev_sample = self._get_prev_sample(state, sample, timestep, prev_timestep, model_output)
state = state.replace(counter=(state.counter + 1))
return (prev_sample, state)
def _get_prev_sample(self, state: PNDMSchedulerState, sample, timestep, prev_timestep, model_output):
alpha_prod_t = state.common.alphas_cumprod[timestep]
alpha_prod_t_prev = jnp.where((prev_timestep >= 0), state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod)
beta_prod_t = (1 - alpha_prod_t)
beta_prod_t_prev = (1 - alpha_prod_t_prev)
if (self.config.prediction_type == 'v_prediction'):
model_output = (((alpha_prod_t ** 0.5) * model_output) + ((beta_prod_t ** 0.5) * sample))
elif (self.config.prediction_type != 'epsilon'):
raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`')
sample_coeff = ((alpha_prod_t_prev / alpha_prod_t) ** 0.5)
model_output_denom_coeff = ((alpha_prod_t * (beta_prod_t_prev ** 0.5)) + (((alpha_prod_t * beta_prod_t) * alpha_prod_t_prev) ** 0.5))
prev_sample = ((sample_coeff * sample) - (((alpha_prod_t_prev - alpha_prod_t) * model_output) / model_output_denom_coeff))
return prev_sample
def add_noise(self, state: PNDMSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray) -> jnp.ndarray:
return add_noise_common(state.common, original_samples, noise, timesteps)
def __len__(self):
return self.config.num_train_timesteps |
def args(mode):
assert (mode in ['train', 'test', 'debug'])
parser = argparse.ArgumentParser()
if (mode == 'train'):
parser.add_argument('--optim', default='SGD', help='set the optimizer of model [Adadelta, Adagrad, Adam, SparseAdam, Adamax, ASGD, LBFGS, RMSprop, Rprop, SGD]')
parser.add_argument('--trset', default='DUTS-TR', help='set the traing set')
parser.add_argument('--scheduler', default='StepLR', help='set the scheduler')
parser.add_argument('--lr', default=0.004, type=float, help='set base learning rate')
parser.add_argument('--model', default='resnet', help='Set the model')
parser.add_argument('--batch', default=8, type=int, help='Batch Size')
parser.add_argument('--size', default=288, type=int, help='Image Size')
parser.add_argument('--vals', default='', help='Validation sets')
parser.add_argument('--ids', default='0', help='Set the cuda devices')
parser.add_argument('--sub', default='baseline', help='The name of network')
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--save', action='store_false')
parser.add_argument('--supervised', action='store_true')
parser.add_argument('--spath', default='save', help='model path')
parser.add_argument('--rpath', default='result', help='visualization path')
return parser.parse_args() |
def clarin_corpora_sorted_by_size(base_directory: Path) -> List[GermanClarinCorpus]:
return [sc1(base_directory), pd2(base_directory), ziptel(base_directory), sc10(base_directory), GermanClarinCorpus('all.HEMPEL.4.cmdi.11610.', base_directory), GermanClarinCorpus('all.PD1.3.cmdi.16312.', base_directory), GermanClarinCorpus('all.VM1.3.cmdi.1508.', base_directory, id_filter_regex=vm1_id_german_filter_regex, training_test_split=TrainingTestSplit.training_only), GermanClarinCorpus('all.RVG-J.1.cmdi.18181.', base_directory), GermanClarinCorpus('all.ALC.4.cmdi.16602.', base_directory, training_test_split=TrainingTestSplit.randomly_grouped_by((lambda e: e.id[:3]))), GermanClarinCorpus('all.VM2.3.cmdi.4260.', base_directory, id_filter_regex=vm2_id_german_filter_regex, training_test_split=TrainingTestSplit.training_only)] |
def append_embedding_input_for_ranking(column_name, input_tensors):
append_tensor_to_collection(RANKING_SERVICE_EMBEDDING, column_name, 'input', input_tensors) |
def main_validation(default_evaluation_params_fn, validate_data_fn):
try:
p = dict([s[1:].split('=') for s in sys.argv[1:]])
evalParams = default_evaluation_params_fn()
if ('p' in p.keys()):
evalParams.update((p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:(- 1)])))
validate_data_fn(p['g'], p['s'], evalParams)
print('SUCCESS')
sys.exit(0)
except Exception as e:
print(str(e))
sys.exit(101) |
class GPT2TokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
def operator_getitem(a, b):
def to_concrete(t):
if isinstance(t, torch.Tensor):
concrete = torch.ones_like(t, device=_DEVICE)
if (concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]):
concrete = concrete.to(torch.int64)
return concrete
return t
if isinstance(a, torch.Tensor):
if isinstance(b, tuple):
b = tuple(map(to_concrete, b))
else:
b = to_concrete(b)
return operator.getitem(torch.empty_like(a, device=_DEVICE), b).to(_DEVICE)
return operator.getitem(a, b) |
def _compute_all_nbb(img_dir, conf_th, max_bb, min_bb, nproc):
files = glob.glob(f'{img_dir}/*.npz')
with mp.Pool(nproc) as pool:
fname2nbb = dict(pool.imap_unordered(_compute_item(conf_th, max_bb, min_bb), tqdm(files), chunksize=2048))
return fname2nbb |
class ClassificationHead(nn.Sequential):
def __init__(self, in_channels, classes, pooling='avg', dropout=0.2, activation=None):
if (pooling not in ('max', 'avg')):
raise ValueError("Pooling should be one of ('max', 'avg'), got {}.".format(pooling))
pool = (nn.AdaptiveAvgPool2d(1) if (pooling == 'avg') else nn.AdaptiveMaxPool2d(1))
flatten = Flatten()
dropout = (nn.Dropout(p=dropout, inplace=True) if dropout else nn.Identity())
linear = nn.Linear(in_channels, classes, bias=True)
activation = Activation(activation)
super().__init__(pool, flatten, dropout, linear, activation) |
_ingredient.named_config
def cars():
name = 'cars'
data_path = 'data/CARS_196'
resize = (256, 256)
color_jitter = (0.3, 0.3, 0.3, 0.1)
ratio = (1, 1) |
def generate_data_fn2(rows, cnt, x_low, x_high, fn):
x_array = []
y_array = []
while (len(x_array) < rows):
args = ([np.random.uniform(x_low, x_high)] * cnt)
try:
y = fn(*args)
if (not math.isnan(y)):
x_array.append(args)
y_array.append(y)
except (ValueError, ZeroDivisionError):
pass
return (np.array(x_array, dtype=np.float32), np.array(y_array, dtype=np.float32)) |
class BertGenerationDecoder(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class MuLBertTrainer(BaseTrainer):
def __init__(self, config, logger):
super().__init__(config, logger)
self.bert_config = self.config.model_config.bert
self.load()
self.scaler = torch.cuda.amp.GradScaler()
def load_dataset(self):
self.logger.write('Loading dataset')
dataset_name = self.config.dataset_config.dataset_name
atm_task = (self.bert_config.multimodal_objective == 'atm')
if (dataset_name == 'audiocaption'):
train_dataset = AudioCaptionDataset(self.config.dataset_config, atm_task=atm_task)
val_dataset = AudioCaptionDataset(self.config.dataset_config, dataset_type='val', atm_task=atm_task)
else:
raise ValueError('{} dataset is not supported.'.format(dataset_name))
self.train_loader = DataLoader(dataset=train_dataset, batch_size=self.config.training.batch_size, shuffle=self.config.training.shuffle, num_workers=self.config.training.num_workers, pin_memory=self.config.training.pin_memory, drop_last=True)
self.val_loader = DataLoader(dataset=val_dataset, batch_size=self.config.training.batch_size, shuffle=self.config.training.shuffle, num_workers=self.config.training.num_workers, pin_memory=self.config.training.pin_memory)
self.logger.write('Number of training samples: {}'.format(train_dataset.__len__()))
def build_model(self):
self.logger.write('Building model')
config = MultimodalBertConfig(self.bert_config)
if (self.config.model_config.pretrained_bert is not None):
self.logger.write('Initializing BERT weights with pretrained {}'.format(self.config.model_config.pretrained_bert))
self.model = MuLBertForPretraining.from_pretrained(pretrained_model_name_or_path=self.config.model_config.pretrained_bert, config=config, audio_config=self.config.model_config.audio)
else:
self.logger.write('No pretrained BERT initialisation. The text branch will be trained from scratch.')
self.model = MuLBertForPretraining(config, self.config.model_config.audio)
if self.model.audio_backbone.pretrained_version:
state_dict = torch.load(self.config.model_config.audio.feature_extractor_path)
self.model.audio_backbone.feature_extractor.load_state_dict(state_dict, strict=False)
if (not self.config.model_config.audio.finetune):
for param in self.model.audio_backbone.feature_extractor.parameters():
param.requires_grad = False
if self.config.model_config.freeze_text_branch:
self.logger.write('Freezing text branch')
for (name, param) in self.model.named_parameters():
text_branch = ['bert.embeddings', 'bert.encoder.layer', 'bert.t_pooler', 'cls.text_predictions']
if any(((s in name) for s in text_branch)):
param.requires_grad = False
self.model.to(self.device)
def build_optimizer(self):
self.logger.write('Building optimizer')
optimizer_name = self.config.training.optimizer
if (optimizer_name == 'adam'):
self.optimizer = Adam(self.model.parameters(), lr=self.config.training.lr)
elif (optimizer_name == 'adamw'):
self.optimizer = AdamW(self.model.parameters(), lr=self.config.training.lr, eps=self.config.training.adam_epsilon, betas=self.config.training.adam_betas)
num_train_optimization_steps = (int(((self.train_loader.dataset.__len__() / self.config.training.batch_size) / self.config.training.grad_acc_steps)) * self.config.training.epochs)
warmup_steps = (self.config.training.warmup_proportion * num_train_optimization_steps)
self.scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.config.training.weight_decay}, {'params': [p for (n, p) in self.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
elif (optimizer_name == 'adadelta'):
self.optimizer = Adadelta(self.model.parameters(), lr=self.config.training.lr)
else:
raise ValueError('{} optimizer is not supported.'.format(optimizer_name))
def train(self):
best_val_loss = np.inf
if os.path.exists(self.logger.checkpoint_path):
self.logger.write('Resumed training experiment with id {}'.format(self.logger.experiment_id))
self.load_ckp(self.logger.checkpoint_path)
else:
self.logger.write('Started training experiment with id {}'.format(self.logger.experiment_id))
self.start_epoch = 0
for epoch in range(self.start_epoch, self.config.training.epochs):
epoch_start_time = time.time()
train_loss = self.train_epoch(self.train_loader, is_training=True)
val_loss = self.train_epoch_val(self.val_loader)
epoch_time = (time.time() - epoch_start_time)
self.logger.update_training_log((epoch + 1), train_loss, val_loss, epoch_time, self.scheduler.get_last_lr()[0])
checkpoint = {'epoch': (epoch + 1), 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict()}
is_best = (val_loss < best_val_loss)
if is_best:
best_val_loss = val_loss
self.logger.save_checkpoint(state=checkpoint, is_best=is_best)
def load_ckp(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.start_epoch = checkpoint['epoch']
def train_epoch(self, data_loader, is_training):
running_loss = 0.0
n_batches = 0
if is_training:
self.model.train()
if (self.model.audio_backbone.pretrained_version is not None):
for module in self.model.audio_backbone.feature_extractor.modules():
if (isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm1d)):
module.eval()
else:
self.model.eval()
for (i, batch) in enumerate(data_loader):
batch = tuple((t.to(device=self.device) for t in batch))
(audio_id, input_audio, audio_attention_mask, text_input_ids, text_input_type_ids, text_attention_mask, mlm_labels, atm_label) = batch
if (self.config.model_config.atm_loss_weight != 0):
mlm_labels = (mlm_labels * (atm_label == 0).long().unsqueeze(1))
mlm_labels[(mlm_labels == 0)] = (- 1)
with torch.cuda.amp.autocast(enabled=self.config.training.amp):
(masked_loss_t, atm_loss, masked_loss_a) = self.model(input_audio=input_audio, text_input_ids=text_input_ids, text_input_type_ids=text_input_type_ids, text_attention_mask=text_attention_mask, mlm_labels=mlm_labels, atm_label=atm_label)
masked_loss_t = (self.config.model_config.mlm_loss_weight * masked_loss_t)
masked_loss_a = (self.config.model_config.audio_loss_weight * masked_loss_a)
atm_loss = (self.config.model_config.atm_loss_weight * atm_loss)
loss = ((masked_loss_t + atm_loss) + masked_loss_a)
if is_training:
if self.config.training.amp:
if (self.config.training.grad_acc_steps > 1):
loss = (loss / self.config.training.grad_acc_steps)
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss.backward()
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
running_loss += loss.item()
n_batches += 1
return (running_loss / n_batches)
def train_epoch_val(self, data_loader):
with torch.no_grad():
loss = self.train_epoch(data_loader, is_training=False)
return loss |
def stable_cumsum(arr, rtol=1e-05, atol=1e-08):
out = np.cumsum(arr, dtype=np.float64)
expected = np.sum(arr, dtype=np.float64)
if (not np.allclose(out[(- 1)], expected, rtol=rtol, atol=atol)):
raise RuntimeError('cumsum was found to be unstable: its last element does not correspond to sum')
return out |
class DBLP4k(BaseData):
def __init__(self, data_root: Optional[str]=None):
super().__init__('dblp_4k', data_root)
self._content = {'num_classes': 4, 'num_vertices': 4057, 'num_paper_edges': 14328, 'num_term_edges': 7723, 'num_conf_edges': 20, 'dim_features': 334, 'features': {'upon': [{'filename': 'features.pkl', 'md5': '7f8e6c3219026c284342d45c01e16406'}], 'loader': load_from_pickle, 'preprocess': [to_tensor, partial(norm_ft, ord=1)]}, 'labels': {'upon': [{'filename': 'labels.pkl', 'md5': '6ffe5ab8c5670d8b5df595b5c4c63184'}], 'loader': load_from_pickle, 'preprocess': [to_long_tensor]}, 'edge_by_paper': {'upon': [{'filename': 'edge_by_paper.pkl', 'md5': 'e473eddeb4692f732bc1e47ae94d62c2'}], 'loader': load_from_pickle}, 'edge_by_term': {'upon': [{'filename': 'edge_by_term.pkl', 'md5': '1ca7cfbf46a7f5fc743818c65392a0ed'}], 'loader': load_from_pickle}, 'edge_by_conf': {'upon': [{'filename': 'edge_by_conf.pkl', 'md5': '890d683b7d8f943ac6d7e87043e0355e'}], 'loader': load_from_pickle}, 'paper_author_dict': {'upon': [{'filename': 'paper_author_dict.pkl', 'md5': 'eb2922e010a78961b5b66e77f9bdf950'}], 'loader': load_from_pickle}, 'term_paper_dict': {'upon': [{'filename': 'term_paper_dict.pkl', 'md5': '1d71f988b52b0e1da9d12f1d3fe24350'}], 'loader': load_from_pickle}, 'conf_paper_dict': {'upon': [{'filename': 'conf_paper_dict.pkl', 'md5': 'cbf87d64dce4ef40d2ab8406e1ee10e1'}], 'loader': load_from_pickle}} |
def predict(audio_path, question):
print('audio path, ', audio_path)
begin_time = time.time()
if (audio_path != None):
(cur_audio_input, cur_input) = load_audio_trans(audio_path)
if (torch.cuda.is_available() == False):
pass
else:
cur_audio_input = cur_audio_input.unsqueeze(0).half().to(device)
instruction = question
prompt = prompter.generate_prompt(instruction, cur_input)
print('Input prompt: ', prompt)
inputs = tokenizer(prompt, return_tensors='pt')
input_ids = inputs['input_ids'].to(device)
generation_config = GenerationConfig(do_sample=True, temperature=temp, top_p=top_p, top_k=top_k, repetition_penalty=1.1, max_new_tokens=500, bos_token_id=model.config.bos_token_id, eos_token_id=model.config.eos_token_id, pad_token_id=model.config.pad_token_id, num_return_sequences=1)
with torch.no_grad():
generation_output = model.generate(input_ids=input_ids, audio_input=cur_audio_input, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=500)
s = generation_output.sequences[0]
output = tokenizer.decode(s)
output = output[5:(- 4)]
end_time = time.time()
print(trim_string(output))
cur_res = {'audio_id': audio_path, 'instruction': instruction, 'input': cur_input, 'output': trim_string(output)}
eval_log.append(cur_res)
with open(log_save_path, 'w') as outfile:
json.dump(eval_log, outfile, indent=1)
print('eclipse time: ', (end_time - begin_time), ' seconds.')
return trim_string(output) |
_module()
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0):
super().__init__()
self.gan_type = gan_type
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
self.loss_weight = loss_weight
if (self.gan_type == 'smgan'):
self.gaussian_blur = GaussianBlur()
if (self.gan_type == 'vanilla'):
self.loss = nn.BCEWithLogitsLoss()
elif ((self.gan_type == 'lsgan') or (self.gan_type == 'smgan')):
self.loss = nn.MSELoss()
elif (self.gan_type == 'wgan'):
self.loss = self._wgan_loss
elif (self.gan_type == 'hinge'):
self.loss = nn.ReLU()
else:
raise NotImplementedError(f'GAN type {self.gan_type} is not implemented.')
def _wgan_loss(self, input, target):
return ((- input.mean()) if target else input.mean())
def get_target_label(self, input, target_is_real):
if (self.gan_type == 'wgan'):
return target_is_real
target_val = (self.real_label_val if target_is_real else self.fake_label_val)
return (input.new_ones(input.size()) * target_val)
def forward(self, input, target_is_real, is_disc=False, mask=None):
target_label = self.get_target_label(input, target_is_real)
if (self.gan_type == 'hinge'):
if is_disc:
input = ((- input) if target_is_real else input)
loss = self.loss((1 + input)).mean()
else:
loss = (- input.mean())
elif (self.gan_type == 'smgan'):
(input_height, input_width) = input.shape[2:]
(mask_height, mask_width) = mask.shape[2:]
if ((input_height != mask_height) or (input_width != mask_width)):
input = F.interpolate(input, size=(mask_height, mask_width), mode='bilinear', align_corners=True)
target_label = self.get_target_label(input, target_is_real)
if is_disc:
if target_is_real:
target_label = target_label
else:
target_label = (self.gaussian_blur(mask).detach().cuda() if mask.is_cuda else self.gaussian_blur(mask).detach().cpu())
loss = self.loss(input, target_label)
else:
loss = ((self.loss(input, target_label) * mask) / mask.mean())
loss = loss.mean()
else:
loss = self.loss(input, target_label)
return (loss if is_disc else (loss * self.loss_weight)) |
class LogF1PrecRecHeatmap(Callback):
def __init__(self, class_names: List[str]=None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
self.ready = True
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if self.ready:
self.preds.append(outputs['preds'])
self.targets.append(outputs['targets'])
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(targets, preds, average=None)
r = recall_score(targets, preds, average=None)
p = precision_score(targets, preds, average=None)
data = [f1, p, r]
plt.figure(figsize=(14, 3))
sn.set(font_scale=1.2)
sn.heatmap(data, annot=True, annot_kws={'size': 10}, fmt='.3f', yticklabels=['F1', 'Precision', 'Recall'])
experiment.log({f'f1_p_r_heatmap/{experiment.name}': wandb.Image(plt)}, commit=False)
plt.clf()
self.preds.clear()
self.targets.clear() |
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
for p in paths:
if (not os.path.exists(p)):
raise RuntimeError(('Invalid path: %s' % p))
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
print('calculate path1 statistics...')
(m1, s1) = _compute_statistics_of_path(paths[0], model, batch_size, dims, cuda)
print('calculate path2 statistics...')
(m2, s2) = _compute_statistics_of_path(paths[1], model, batch_size, dims, cuda)
print('calculate frechet distance...')
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value |
class VectorizedGP(VectorizedModel):
def __init__(self, input_dim, feature_dim=2, covar_module_str='SE', mean_module_str='constant', mean_nn_layers=(32, 32), kernel_nn_layers=(32, 32), nonlinearlity=torch.tanh):
super().__init__(input_dim, 1)
self._params = OrderedDict()
self.mean_module_str = mean_module_str
self.covar_module_str = covar_module_str
if (mean_module_str == 'NN'):
self.mean_nn = self._param_module('mean_nn', NeuralNetworkVectorized(input_dim, 1, layer_sizes=mean_nn_layers, nonlinearlity=nonlinearlity))
elif (mean_module_str == 'constant'):
self.constant_mean = self._param('constant_mean', torch.zeros(1, 1))
else:
raise NotImplementedError
if (covar_module_str == 'NN'):
self.kernel_nn = self._param_module('kernel_nn', NeuralNetworkVectorized(input_dim, feature_dim, layer_sizes=kernel_nn_layers, nonlinearlity=nonlinearlity))
self.lengthscale_raw = self._param('lengthscale_raw', torch.zeros(1, feature_dim))
elif (covar_module_str == 'SE'):
self.lengthscale_raw = self._param('lengthscale_raw', torch.zeros(1, input_dim))
else:
raise NotImplementedError
self.noise_raw = self._param('noise_raw', torch.zeros(1, 1))
def forward(self, x_data, y_data, train=True, prior=False):
assert (x_data.ndim == 3)
if (self.mean_module_str == 'NN'):
learned_mean = self.mean_nn
mean_module = None
else:
learned_mean = None
mean_module = ConstantMeanLight(self.constant_mean)
if (self.covar_module_str == 'NN'):
learned_kernel = self.kernel_nn
else:
learned_kernel = None
lengthscale = F.softplus(self.lengthscale_raw)
lengthscale = lengthscale.view(lengthscale.shape[0], 1, lengthscale.shape[1])
covar_module = SEKernelLight(lengthscale)
noise = F.softplus(self.noise_raw)
likelihood = GaussianLikelihoodLight(noise)
gp = LearnedGPRegressionModel(x_data, y_data, likelihood, mean_module=mean_module, covar_module=covar_module, learned_mean=learned_mean, learned_kernel=learned_kernel)
if prior:
gp.train()
likelihood.train()
return (gp, likelihood)
elif train:
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp)
output = gp(x_data)
return (likelihood(output), mll(output, y_data))
else:
gp.eval()
likelihood.eval()
return (gp, likelihood)
def parameter_shapes(self):
return OrderedDict([(name, param.shape) for (name, param) in self.named_parameters().items()])
def named_parameters(self):
return self._params
def _param_module(self, name, module):
assert (type(name) == str)
assert hasattr(module, 'named_parameters')
for (param_name, param) in module.named_parameters().items():
self._param(((name + '.') + param_name), param)
return module
def _param(self, name, tensor):
assert (type(name) == str)
assert isinstance(tensor, torch.Tensor)
assert (name not in list(self._params.keys()))
if (not (device.type == tensor.device.type)):
tensor = tensor.to(device)
self._params[name] = tensor
return tensor
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs) |
def make_keras_picklable():
import keras.models
def __getstate__(self):
model_str = ''
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
return {'model_str': model_str}
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
try:
model = keras.models.load_model(fd.name)
except ValueError:
from keras.applications import mobilenet
from keras.utils.generic_utils import CustomObjectScope
scope = {'relu6': mobilenet.relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D}
with CustomObjectScope(scope):
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls = keras.models.Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__ |
class PPOTrainer(Trainer):
policy_class = PPOPolicy
def __init__(self, tensorboard_log_dir: Optional[str]=None, ppo_epoch: int=4, num_mini_batch: int=32, clip_param: float=0.2, use_clipped_value_loss: bool=True, use_linear_lr_decay: bool=False, lr: float=0.0007, eps: float=1e-05, value_loss_coef: float=0.5, entropy_coef: float=0.01, max_grad_norm: float=0.5, use_gae: bool=False, gamma: float=0.99, gae_lambda: float=0.95, use_proper_time_limits: bool=False) -> None:
super().__init__(tensorboard_log_dir)
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.clip_param = clip_param
self.use_clipped_value_loss = use_clipped_value_loss
self.use_linear_lr_decay = use_linear_lr_decay
self.lr = lr
self.eps = eps
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.use_gae = use_gae
self.gamma = gamma
self.gae_lambda = gae_lambda
self.use_proper_time_limits = use_proper_time_limits
def train(self, env_class: Type[PhantomEnv], num_iterations: int, policies: PolicyMapping, policies_to_train: Sequence[PolicyID], env_config: Optional[Mapping[(str, Any)]]=None, metrics: Optional[Mapping[(str, Metric)]]=None) -> TrainingResults:
env_config = (env_config or {})
self.metrics = (metrics or {})
check_env_config(env_config)
num_envs = 10
envs = []
observations = []
for _ in range(num_envs):
env = env_class(**env_config)
observations.append(env.reset())
envs.append(env)
(policy_mapping, policy_instances) = self.setup_policy_specs_and_mapping(env, policies)
assert (len(policies_to_train) == 1)
policy_to_train = policies_to_train[0]
training_policy = policy_instances[policy_to_train]
training_agent = next((a for (a, p) in policy_mapping.items() if (p == policy_to_train)))
assert isinstance(training_policy, self.policy_class)
device = torch.device('cpu')
self.actor_critic = PPOPolicy(training_policy.observation_space, training_policy.action_space, base_kwargs={'recurrent': False})
self.actor_critic.to(device)
self.optimizer = torch.optim.Adam(self.actor_critic.parameters(), lr=self.lr, eps=self.eps)
rollouts = RolloutStorage(envs[0].num_steps, num_envs, training_policy.observation_space, training_policy.action_space, self.actor_critic.recurrent_hidden_state_size)
agent_obs = np.array([obs[training_agent] for obs in observations])
rollouts.obs[0].copy_(torch.FloatTensor(agent_obs))
rollouts.to(device)
for i in rich.progress.track(range(num_iterations), description='Training...'):
if self.use_linear_lr_decay:
update_linear_schedule(self.optimizer, i, num_iterations, self.lr)
episode_rewards = defaultdict(list)
for step in range(env.num_steps):
with torch.no_grad():
(value, trained_policy_actions, action_log_prob, recurrent_hidden_states) = self.actor_critic.act(rollouts.obs[step].reshape(((- 1), 1)), rollouts.recurrent_hidden_states[step], rollouts.masks[step])
new_observations: List[Dict[(AgentID, Any)]] = []
rewards: List[Dict[(AgentID, float)]] = []
terminations: List[Dict[(AgentID, bool)]] = []
truncations: List[Dict[(AgentID, bool)]] = []
infos: List[Dict[(AgentID, Any)]] = []
for (env, obs, tpa) in zip(envs, observations, trained_policy_actions):
actions: Dict[(AgentID, Any)] = {}
for (agent_id, agent_obs) in obs.items():
policy_name = policy_mapping[agent_id]
policy = policy_instances[policy_name]
if (policy_name == policy_to_train):
if (len(tpa) == 1):
actions[agent_id] = tpa[0]
else:
actions[agent_id] = np.array(tpa)
else:
actions[agent_id] = policy.compute_action(agent_obs)
(o, r, te, tr, i_) = env.step(actions)
new_observations.append(o)
rewards.append(r)
terminations.append(te)
truncations.append(tr)
infos.append(i_)
observations = new_observations
for agent_id in rewards[0].keys():
episode_rewards[agent_id].append(np.mean([r[agent_id] for r in rewards]))
masks = torch.FloatTensor([([0.0] if (te[training_agent] or tr[training_agent]) else [1.0]) for (te, tr) in zip(terminations, truncations)])
bad_masks = torch.FloatTensor([([0.0] if ('bad_transition' in info[training_agent].keys()) else [1.0]) for info in infos])
training_observations = torch.FloatTensor([obs[training_agent] for obs in observations])
training_rewards = torch.FloatTensor([[rwd[training_agent]] for rwd in rewards])
rollouts.insert(training_observations, recurrent_hidden_states, trained_policy_actions, action_log_prob, value, training_rewards, masks, bad_masks)
self.log_vec_rewards(rewards)
self.log_vec_metrics(envs)
with torch.no_grad():
next_value = self.actor_critic.get_value(rollouts.obs[(- 1)].reshape(((- 1), 1)), rollouts.recurrent_hidden_states[(- 1)], rollouts.masks[(- 1)]).detach()
rollouts.compute_returns(next_value, self.use_gae, self.gamma, self.gae_lambda, self.use_proper_time_limits)
self.update(rollouts)
rollouts.after_update()
self.tbx_write_values(i)
return TrainingResults(policy_instances)
def update(self, rollouts: RolloutStorage) -> Tuple[(float, float, float)]:
advantages = (rollouts.returns[:(- 1)] - rollouts.value_preds[:(- 1)])
advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05))
value_loss_epoch = 0.0
action_loss_epoch = 0.0
dist_entropy_epoch = 0.0
for _ in range(self.ppo_epoch):
if self.actor_critic.is_recurrent:
data_generator = rollouts.recurrent_generator(advantages, self.num_mini_batch)
else:
data_generator = rollouts.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
(obs_batch, recurrent_hidden_states_batch, actions_batch, value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
(values, action_log_probs, dist_entropy, _) = self.actor_critic.evaluate_actions(obs_batch.reshape(((- 1), 1)), recurrent_hidden_states_batch, masks_batch, actions_batch)
ratio = torch.exp((action_log_probs - old_action_log_probs_batch))
surr1 = (ratio * adv_targ)
surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ)
action_loss = (- torch.min(surr1, surr2).mean())
if self.use_clipped_value_loss:
value_pred_clipped = (value_preds_batch + (values - value_preds_batch).clamp((- self.clip_param), self.clip_param))
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (value_pred_clipped - return_batch).pow(2)
value_loss = (0.5 * torch.max(value_losses, value_losses_clipped).mean())
else:
value_loss = (0.5 * (return_batch - values).pow(2).mean())
self.optimizer.zero_grad()
(((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward()
torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = (self.ppo_epoch * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch) |
class ColorJitter(_BasicTransform):
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(brightness, contrast, saturation, hue):
transforms = []
if (brightness > 0):
brightness_factor = np.random.uniform(max(0, (1 - brightness)), (1 + brightness))
transforms.append(torch_tr.Lambda((lambda img: random_adjust_brightness(img, brightness_factor))))
if (contrast > 0):
contrast_factor = np.random.uniform(max(0, (1 - contrast)), (1 + contrast))
transforms.append(torch_tr.Lambda((lambda img: random_adjust_contrast(img, contrast_factor))))
if (saturation > 0):
saturation_factor = np.random.uniform(max(0, (1 - saturation)), (1 + saturation))
transforms.append(torch_tr.Lambda((lambda img: random_adjust_saturation(img, saturation_factor))))
if (hue > 0):
hue_factor = np.random.uniform((- hue), hue)
transforms.append(torch_tr.Lambda((lambda img: random_adjust_hue(img, hue_factor))))
np.random.shuffle(transforms)
transform = ComposeSingleInput(transforms)
return transform
def __call__(self, img, mask1=None, mask2=None):
if (random.random() < PROB_THRESHOLD):
return (img, mask1, mask2)
transform = self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
return (transform(img), mask1, mask2) |
def _merge_a_into_b(a, b, stack=None):
assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'
assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'
for (k, v_) in a.items():
full_key = ((('.'.join(stack) + '.') + k) if (stack is not None) else k)
if (k not in b):
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
if isinstance(v, AttrDict):
try:
stack_push = ([k] if (stack is None) else (stack + [k]))
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v |
def rotate_batch(batch, label):
if (label == 'rand'):
labels = torch.randint(4, (len(batch),), dtype=torch.long)
elif (label == 'expand'):
labels = torch.cat([torch.zeros(len(batch), dtype=torch.long), (torch.zeros(len(batch), dtype=torch.long) + 1), (torch.zeros(len(batch), dtype=torch.long) + 2), (torch.zeros(len(batch), dtype=torch.long) + 3)])
batch = batch.repeat((4, 1, 1, 1))
else:
assert isinstance(label, int)
labels = (torch.zeros((len(batch),), dtype=torch.long) + label)
return (rotate_batch_with_labels(batch, labels), labels) |
class RoIAlignAvg(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale, sampling_ratio):
super(RoIAlignAvg, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
def forward(self, features, rois):
x = RoIAlignFunction((self.aligned_height + 1), (self.aligned_width + 1), self.spatial_scale, self.sampling_ratio)(features, rois)
return avg_pool2d(x, kernel_size=2, stride=1) |
class ComponentEncoder(nn.Module):
def __init__(self, body, final_shape, sigmoid=False):
super().__init__()
self.body = nn.ModuleList(body)
self.final_shape = final_shape
self.sigmoid = sigmoid
def forward(self, x):
ret_feats = {}
for (i, layer) in enumerate(self.body):
x = layer(x)
if (i == 2):
ret_feats['mid'] = x
if (i == 6):
ret_feats['last'] = x
if self.sigmoid:
ret_feats = {k: nn.Sigmoid()(v) for (k, v) in ret_feats.items()}
return ret_feats |
class ScriptArguments():
model_name: Optional[str] = field(default='./output_threat_type', metadata={'help': 'the model name'})
output_name: Optional[str] = field(default=None, metadata={'help': 'the model name'}) |
def parse_pgb_tree(bins, nodes_idx, nodes_split_bin, nodes_split_feature, leaves_idx, leaves_mu, learning_rate, lt_op=0, is_float32=False):
children_left = []
children_right = []
feature = []
threshold = []
leaf_vals = []
if (np.sum(nodes_idx) == 0):
leaf_vals.append(((- leaves_mu[0]) * learning_rate))
feature.append((- 1))
threshold.append((- 1))
else:
leaf_vals.append((- 1))
feature.append(nodes_split_feature[0])
threshold.append(bins[(nodes_split_feature[0], nodes_split_bin[0])])
node_id = 1
stack = [((node_id * 2), 0), (((node_id * 2) + 1), 1)]
while (len(stack) > 0):
(node, is_left) = stack.pop(0)
if (node is None):
if is_left:
children_left.append((- 1))
else:
children_right.append((- 1))
else:
if is_left:
children_left.append(node_id)
else:
children_right.append(node_id)
if (node in nodes_idx):
node_idx_arr = np.where((nodes_idx == node))[0]
assert (len(node_idx_arr) == 1)
node_idx = node_idx_arr[0]
feature.append(nodes_split_feature[node_idx])
threshold.append(bins[(nodes_split_feature[node_idx], nodes_split_bin[node_idx])])
leaf_vals.append((- 1))
stack.append(((node * 2), 0))
stack.append((((node * 2) + 1), 1))
else:
leaf_idx_arr = np.where((leaves_idx == node))[0]
assert (len(leaf_idx_arr) == 1)
leaf_idx = leaf_idx_arr[0]
feature.append((- 1))
threshold.append((- 1))
leaf_vals.append(((- leaves_mu[leaf_idx]) * learning_rate))
stack.append((None, 0))
stack.append((None, 1))
node_id += 1
return Tree(children_left, children_right, feature, threshold, leaf_vals, lt_op, is_float32) |
('slow_tv')
class SlowTvDataset(MdeBaseDataset):
VALID_DATUM = 'image support K'
SHAPE = (720, 1280)
def __init__(self, split: str, mode: str, **kwargs):
super().__init__(**kwargs)
self.split = split
self.mode = mode
(self.split_file, self.items_data) = self.parse_items()
self.cats = self.parse_cats()
self._max_offset_per_cat = {'natural': 5, 'driving': 1, 'underwater': 5}
def log_args(self):
self.logger.info(f"Split: '{self.split}' - Mode: '{self.mode}'")
super().log_args()
def validate_args(self) -> None:
super().validate_args()
if (0 in self.supp_idxs):
raise ValueError('SlowTV does not provide stereo pairs.')
def parse_items(self) -> tuple[(Path, ty.S[stv.Item])]:
(file, items) = stv.load_split(self.mode, self.split)
return (file, items)
def parse_cats(self) -> dict[(str, str)]:
return {seq: c for (seq, c) in zip(stv.get_seqs(), stv.load_categories(subcats=False))}
def _load_image(self, data: stv.Item, offset: int=0) -> Image:
file = stv.get_img_file(seq=data.seq, stem=(int(data.stem) + offset))
if (not file.is_file()):
exc = (FileNotFoundError if (offset == 0) else ty.SuppImageNotFoundError)
raise exc(f'Could not find specified file "{file}" with "offset={offset!r}"')
img = Image.open(file)
if self.should_resize:
img = img.resize(self.size, resample=Image.Resampling.BILINEAR)
return img
def get_supp_scale(self, data: stv.Item) -> int:
if (not self.randomize_supp):
return 1
cat = self.cats[data.seq]
k = random.randint(1, self._max_offset_per_cat[cat])
return k
def _load_K(self, data: stv.Item) -> ty.A:
K = stv.load_intrinsics(data[0])
if self.should_resize:
K = geo.resize_K(K, self.shape, self.SHAPE)
return K
def _load_stereo_image(self, data: stv.Item) -> None:
raise NotImplementedError('SlowTV does not contain stereo pairs.')
def _load_stereo_T(self, data: stv.Item) -> None:
raise NotImplementedError('SlowTV does not contain stereo pairs.')
def _load_depth(self, data: stv.Item) -> None:
raise NotImplementedError('SlowTV does not contain ground-truth depth.') |
def hypertree_model(images=None, vectors=None, image_shapes=None, vector_shapes=None, dropout_rate=None, activation='sigmoid', final_pooling=None, include_top=True, top='segmentation', top_block_filters=64, classes=1, output_shape=None, create_image_tree_roots_fn=None, create_vector_tree_roots_fn=None, create_tree_trunk_fn=None, top_block_dense_layers=0, top_block_hidden_activation='relu', top_block_fn=top_block, verbose=0):
if ((images is None) and (image_shapes is None)):
raise ValueError('There must be at least one entry in the images parameter or the image_shapes parameter.')
if (output_shape is None):
if (images is not None):
output_shape = keras.backend.int_shape(images[0])[1:]
elif (image_shapes is not None):
output_shape = image_shapes[0]
(image_inputs, image_logits) = create_tree_roots(images, image_shapes, make_layer_fn=create_image_tree_roots_fn)
(vector_inputs, vector_logits) = create_tree_roots(vectors, vector_shapes, make_layer_fn=create_vector_tree_roots_fn)
if ((vector_logits is None) and isinstance(image_logits, list)):
if (len(image_logits) > 1):
x = Concatenate(axis=(- 1))(image_logits)
else:
[x] = image_logits
else:
v = vector_logits
if (len(vector_logits) > 1):
v = Concatenate(axis=(- 1))(v)
elif isinstance(v, list):
[v] = v
else:
raise ValueError(('Unknown configuration of input vectors, you will need to look at the code and see what went wrong with v: ' + str(v)))
if (v is not None):
x = concat_images_with_tiled_vector_layer(image_logits, v)
if (create_tree_trunk_fn is not None):
x = create_tree_trunk_fn(x)
if (not isinstance(x, list)):
x = [x]
xs = []
name = ''
for (i, xi) in enumerate(x):
if (len(x) > 1):
name = str(i)
xi = top_block_fn(xi, output_shape, top, dropout_rate, include_top, classes, activation, final_pooling, top_block_filters, dense_layers=top_block_dense_layers, hidden_activation=top_block_hidden_activation, name=name, verbose=verbose)
xs += [xi]
if (len(xs) == 1):
[x] = xs
else:
x = xs
inputs = (image_inputs + vector_inputs)
print(('hypertree_model x: ' + str(x)))
model = keras.models.Model(inputs=inputs, outputs=x)
return model |
class DistilBertOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) |
class MPNetModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def fmeasure_from_file(golden_file, predict_file, label_type='BMES'):
print('Get f measure from file:', golden_file, predict_file)
print('Label format:', label_type)
(golden_sent, golden_labels) = readSentence(golden_file)
(predict_sent, predict_labels) = readSentence(predict_file)
(P, R, F) = get_ner_fmeasure(golden_labels, predict_labels, label_type)
print(('P:%sm R:%s, F:%s' % (P, R, F))) |
def convert_to_skopt_space(method, space):
return [convert_param_to_skopt(param, name=name) for (name, param) in space[method].items()] |
def get_model_tuning_dict_results():
tuning_result_dict = {}
framework_version = get_framework_version(shlex.quote(args.framework))
if os.path.exists(tuning_log):
print('tuning log found')
tmp = {'fp32_acc': 0, 'int8_acc': 0, 'tuning_trials': 0}
with open(tuning_log, 'r') as f:
for line in f:
parse_tuning_line(line, tmp)
tuning_result_dict = {'OS': OS, 'Platform': PLATFORM, 'Framework': shlex.quote(args.framework), 'Version': framework_version, 'Model': shlex.quote(args.model), 'Strategy': tmp.get('strategy', 'basic'), 'Tune_time': tmp.get('tune_time')}
benchmark_accuracy_result_dict = {'int8': {'OS': OS, 'Platform': PLATFORM, 'Framework': shlex.quote(args.framework), 'Version': framework_version, 'Model': shlex.quote(args.model), 'Mode': 'Inference', 'Type': 'Accuracy', 'BS': 1, 'Value': tmp.get('int8_acc'), 'Url': URL}, 'fp32': {'OS': OS, 'Platform': PLATFORM, 'Framework': shlex.quote(args.framework), 'Version': framework_version, 'Model': shlex.quote(args.model), 'Mode': 'Inference', 'Type': 'Accuracy', 'BS': 1, 'Value': tmp.get('fp32_acc'), 'Url': URL}}
return (tuning_result_dict, benchmark_accuracy_result_dict)
else:
return ({}, {}) |
def get_session_classes(args, session):
class_list = np.arange((args.base_class + (session * args.way)))
return class_list |
class Logger(object):
'Reference:
def __init__(self, fn, subdir=None, resume=None):
if (not os.path.exists('./logs/')):
os.mkdir('./logs/')
if resume:
logdir = resume
else:
logdir = self._make_dir(fn, subdir)
if (not os.path.exists(logdir)):
os.makedirs(logdir)
if (len(os.listdir(logdir)) != 0):
ans = input('log_dir is not empty. All data inside log_dir will be deleted. Will you proceed [y/N]? ')
if (ans in ['y', 'Y']):
shutil.rmtree(logdir)
else:
exit(1)
self.set_dir(logdir)
def _make_dir(self, fn, subdir):
if (subdir is None):
subdir = datetime.today().strftime('%y%m%d')
logdir = f'logs/{subdir}/{fn}/{np.random.randint(10000)}'
return logdir
def set_dir(self, logdir, log_fn='log.txt'):
self.logdir = logdir
if (not os.path.exists(logdir)):
raise OSError(('logdir does not exist: %s' % logdir))
self.writer = SummaryWriter(logdir)
self.log_file = open(os.path.join(logdir, log_fn), 'a')
def log(self, string):
self.log_file.write((('[%s] %s' % (datetime.now(), string)) + '\n'))
self.log_file.flush()
print(('[%s] %s' % (datetime.now(), string)))
sys.stdout.flush()
def log_dirname(self, string):
self.log_file.write((('%s (%s)' % (string, self.logdir)) + '\n'))
self.log_file.flush()
print(('%s (%s)' % (string, self.logdir)))
sys.stdout.flush()
def scalar_summary(self, tag, value, step):
self.writer.add_scalar(tag, value, step)
def image_summary(self, tag, image, step, dataformats='HWC'):
self.writer.add_image(tag, image, step, dataformats=dataformats)
def histo_summary(self, tag, values, step):
self.writer.add_histogram(tag, values, step, bins='auto') |
def densenet201(pretrained=False, **kwargs):
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['densenet201']), strict=False)
return model |
def display_alongside_source_image(images):
res = np.concatenate([np.array(image) for image in images], axis=1)
return Image.fromarray(res) |
def query_cluster(db_path: str):
conn = sqlite3.connect(f'{db_path}')
cursor = conn.cursor()
cursor.execute('select * from cluster')
conn.commit()
results = cursor.fetchall()
table = PrettyTable()
table.field_names = [i[0] for i in cursor.description]
for row in results:
table.add_row(row)
table.title = 'Neural Solution Cluster Management System'
logger.info(table)
cursor.close()
conn.close() |
_model
def ecaresnext50t_32x4d(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args) |
class HDF5Datamodule_2d(pl.LightningDataModule):
def __init__(self, name='h5_datamodule_2d', train_path='/content/drive/MyDrive/MILA/snapshots.h5', val_path='/content/drive/MyDrive/MILA/snapshots.h5', test_path='/content/drive/MyDrive/MILA/snapshots.h5', nt_train=128, res_train=256, nt_val=128, res_val=256, nt_test=256, res_test=256, num_workers=2, batch_size=32):
super().__init__()
self.save_hyperparameters()
self.name = name
self.train_path = train_path
self.val_path = val_path
self.test_path = test_path
self.nt_train = nt_train
self.res_train = res_train
self.nt_val = nt_val
self.res_val = res_val
self.nt_test = nt_test
self.res_test = res_test
self.batch_size = batch_size
self.num_workers = num_workers
def setup(self, stage=None):
self.train_dataset = HDF5Dataset(path=self.train_path, mode='train', nt=self.nt_train, res=self.res_train, dtype=torch.float32)
self.val_dataset = HDF5Dataset(path=self.val_path, mode='test', nt=self.nt_val, res=self.res_val, dtype=torch.float32)
self.test_dataset = HDF5Dataset(path=self.test_path, mode='test', nt=self.nt_test, res=self.res_test, dtype=torch.float32)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, pin_memory=True) |
def create_regularization_fns(args):
regularization_fns = []
regularization_coeffs = []
for (arg_key, reg_fn) in six.iteritems(REGULARIZATION_FNS):
if (args[arg_key] is not None):
regularization_fns.append(reg_fn)
regularization_coeffs.append(args[arg_key])
regularization_fns = regularization_fns
regularization_coeffs = regularization_coeffs
return (regularization_fns, regularization_coeffs) |
class GELU(nn.Module):
def forward(self, x):
return ((0.5 * x) * (1 + torch.tanh((math.sqrt((2 / math.pi)) * (x + (0.044715 * torch.pow(x, 3))))))) |
class MXNetModel(BaseModel):
def __init__(self, model, **kwargs):
self.q_config = None
self._model = model
self.calib_cache = {}
def framework(self):
return 'mxnet'
def model(self):
return self._model
def model(self, model):
self._model = model
def save(self, root=None):
if (root is None):
from neural_compressor import config as cfg
root = cfg.default_workspace
root = os.path.abspath(os.path.expanduser(root))
os.makedirs(os.path.dirname(root), exist_ok=True)
if isinstance(self._model, mx.gluon.HybridBlock):
self._model.export(root, remove_amp_cast=False)
logger.info('Save quantized hybrid block model to {}.'.format(root))
else:
(symnet, args, auxs) = self._model
symnet = symnet.as_nd_ndarray()
args = {k: v.as_nd_ndarray() for (k, v) in args.items()}
auxs = {k: v.as_nd_ndarray() for (k, v) in auxs.items()}
mx.model.save_checkpoint(root, 0, symnet, args, auxs, remove_amp_cast=False)
logger.info('Save quantized symbol model to {}.'.format(root)) |
class Proposal(Layer):
def __init__(self, pre_nms_topn, post_nms_topn, ratios, scales, rpn_pre_nms_topn_train=12000, rpn_post_nms_topn_train=2000, bigdl_type='float'):
super(Proposal, self).__init__(None, bigdl_type, pre_nms_topn, post_nms_topn, ratios, scales, rpn_pre_nms_topn_train, rpn_post_nms_topn_train) |
def parse_numpy_printoption(kv_str):
k_v_str = kv_str.split('=', 1)
if ((len(k_v_str) != 2) or (not k_v_str[0])):
raise argparse.ArgumentTypeError(("'%s' is not in the form k=v." % kv_str))
(k, v_str) = k_v_str
printoptions = np.get_printoptions()
if (k not in printoptions):
raise argparse.ArgumentTypeError(("'%s' is not a valid printoption." % k))
v_type = type(printoptions[k])
if (v_type is type(None)):
raise argparse.ArgumentTypeError(("Setting '%s' from the command line is not supported." % k))
try:
v = (v_type(v_str) if (v_type is not bool) else flags.BooleanParser().Parse(v_str))
except ValueError as e:
raise argparse.ArgumentTypeError(e.message)
np.set_printoptions(**{k: v}) |
(InducingImages, Conv2d)
def _Kuu_conv2d(feat: InducingImages, kern: Conv2d, jitter: float=0.0):
_Kuu = kern.kernel.K(feat.as_patches)
return tf.linalg.set_diag(_Kuu, (tf.linalg.diag_part(_Kuu) + jitter)) |
def build_model(vocab, embed_dim: int=100, hid_dim: int=100, min_dec_step: int=2, max_decoding_steps: int=3, fix_edu_num: int=(- 1), use_elmo: bool=False, dropout=0.5, dropout_emb=0.2, span_encoder_type='self_attentive', attn_type='dot', schedule_ratio_from_ground_truth=0.7, pretrain_embedding=None, nenc_lay: int=1, mult_orac_sampling: bool=True, compression: bool=True, word_token_indexers=None, alpha: float=1.0, dbg: bool=False, dec_avd_trigram_rep: bool=True, aggressive_compression: int=(- 1), keep_threshold: float=0.5, weight_alpha=0.0, bias_alpha=0.0, abs_board_file: str='/home/cc/exComp/board.txt', compress_leadn=(- 1), gather='mean', abs_dir_root: str='/scratch/cluster/jcxu', serilization_name='', load_save_model: str=None):
model = Seq2IdxSum(vocab=vocab, word_embedding_dim=embed_dim, hidden_dim=hid_dim, min_dec_step=min_dec_step, max_decoding_steps=max_decoding_steps, fix_edu_num=fix_edu_num, use_elmo=use_elmo, span_encoder_type=span_encoder_type, dropout=dropout, dropout_emb=dropout_emb, attn_type=attn_type, schedule_ratio_from_ground_truth=schedule_ratio_from_ground_truth, pretrain_embedding_file=pretrain_embedding, nenc_lay=nenc_lay, mult_orac_sampling=mult_orac_sampling, word_token_indexers=word_token_indexers, compression=compression, alpha=alpha, dbg=dbg, dec_avd_trigram_rep=dec_avd_trigram_rep, aggressive_compression=aggressive_compression, keep_threshold=keep_threshold, regularizer=RegularizerApplicator([('weight', L2Regularizer(weight_alpha)), ('bias', L1Regularizer(bias_alpha))]), abs_board_file=abs_board_file, gather=gather, compress_leadn=compress_leadn, abs_dir_root=abs_dir_root, serilization_name=serilization_name)
if load_save_model:
model.load_state_dict(torch.load(load_save_model, map_location=get_device()))
device = get_device()
model = model.to(device)
return model |
def test_single_char_arguments():
def toobig_message(r):
return 'Character code point not in range({0:#x})'.format(r)
toolong_message = 'Expected a character, but multi-character string found'
assert (m.ord_char(u'a') == 97)
assert (m.ord_char_lv(u'b') == 98)
assert (m.ord_char(u'e') == 233)
with pytest.raises(ValueError) as excinfo:
assert (m.ord_char(u'A') == 256)
assert (str(excinfo.value) == toobig_message(256))
with pytest.raises(ValueError) as excinfo:
assert m.ord_char(u'ab')
assert (str(excinfo.value) == toolong_message)
assert (m.ord_char16(u'a') == 97)
assert (m.ord_char16(u'e') == 233)
assert (m.ord_char16_lv(u'e') == 234)
assert (m.ord_char16(u'A') == 256)
assert (m.ord_char16(u'') == 8253)
assert (m.ord_char16(u'') == 9829)
assert (m.ord_char16_lv(u'') == 9825)
with pytest.raises(ValueError) as excinfo:
assert (m.ord_char16(u'') == 127874)
assert (str(excinfo.value) == toobig_message(65536))
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16(u'aa')
assert (str(excinfo.value) == toolong_message)
assert (m.ord_char32(u'a') == 97)
assert (m.ord_char32(u'e') == 233)
assert (m.ord_char32(u'A') == 256)
assert (m.ord_char32(u'') == 8253)
assert (m.ord_char32(u'') == 9829)
assert (m.ord_char32(u'') == 127874)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char32(u'aa')
assert (str(excinfo.value) == toolong_message)
assert (m.ord_wchar(u'a') == 97)
assert (m.ord_wchar(u'e') == 233)
assert (m.ord_wchar(u'A') == 256)
assert (m.ord_wchar(u'') == 8253)
assert (m.ord_wchar(u'') == 9829)
if (m.wchar_size == 2):
with pytest.raises(ValueError) as excinfo:
assert (m.ord_wchar(u'') == 127874)
assert (str(excinfo.value) == toobig_message(65536))
else:
assert (m.ord_wchar(u'') == 127874)
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar(u'aa')
assert (str(excinfo.value) == toolong_message)
if hasattr(m, 'has_u8string'):
assert (m.ord_char8(u'a') == 97)
assert (m.ord_char8_lv(u'b') == 98)
assert (m.ord_char8(u'e') == 233)
with pytest.raises(ValueError) as excinfo:
assert (m.ord_char8(u'A') == 256)
assert (str(excinfo.value) == toobig_message(256))
with pytest.raises(ValueError) as excinfo:
assert m.ord_char8(u'ab')
assert (str(excinfo.value) == toolong_message) |
class FLANN():
__rn_gen = _rn.RandomState()
_as_parameter_ = property((lambda self: self.__curindex))
def __init__(self, **kwargs):
self.__rn_gen.seed()
self.__curindex = None
self.__curindex_data = None
self.__curindex_type = None
self.__flann_parameters = FLANNParameters()
self.__flann_parameters.update(kwargs)
def __del__(self):
self.delete_index()
def nn(self, pts, qpts, num_neighbors=1, **kwargs):
if (not (pts.dtype.type in allowed_types)):
raise FLANNException(('Cannot handle type: %s' % pts.dtype))
if (not (qpts.dtype.type in allowed_types)):
raise FLANNException(('Cannot handle type: %s' % pts.dtype))
if (pts.dtype != qpts.dtype):
raise FLANNException('Data and query must have the same type')
pts = ensure_2d_array(pts, default_flags)
qpts = ensure_2d_array(qpts, default_flags)
(npts, dim) = pts.shape
nqpts = qpts.shape[0]
assert (qpts.shape[1] == dim)
assert (npts >= num_neighbors)
result = empty((nqpts, num_neighbors), dtype=index_type)
if (pts.dtype == float64):
dists = empty((nqpts, num_neighbors), dtype=float64)
else:
dists = empty((nqpts, num_neighbors), dtype=float32)
self.__flann_parameters.update(kwargs)
flann.find_nearest_neighbors[pts.dtype.type](pts, npts, dim, qpts, nqpts, result, dists, num_neighbors, pointer(self.__flann_parameters))
if (num_neighbors == 1):
return (result.reshape(nqpts), dists.reshape(nqpts))
else:
return (result, dists)
def build_index(self, pts, **kwargs):
if (not (pts.dtype.type in allowed_types)):
raise FLANNException(('Cannot handle type: %s' % pts.dtype))
pts = ensure_2d_array(pts, default_flags)
(npts, dim) = pts.shape
self.__ensureRandomSeed(kwargs)
self.__flann_parameters.update(kwargs)
if (self.__curindex != None):
flann.free_index[self.__curindex_type](self.__curindex, pointer(self.__flann_parameters))
self.__curindex = None
speedup = c_float(0)
self.__curindex = flann.build_index[pts.dtype.type](pts, npts, dim, byref(speedup), pointer(self.__flann_parameters))
self.__curindex_data = pts
self.__curindex_type = pts.dtype.type
params = dict(self.__flann_parameters)
params['speedup'] = speedup.value
return params
def save_index(self, filename):
if (self.__curindex != None):
flann.save_index[self.__curindex_type](self.__curindex, c_char_p(to_bytes(filename)))
def load_index(self, filename, pts):
if (not (pts.dtype.type in allowed_types)):
raise FLANNException(('Cannot handle type: %s' % pts.dtype))
pts = ensure_2d_array(pts, default_flags)
(npts, dim) = pts.shape
if (self.__curindex != None):
flann.free_index[self.__curindex_type](self.__curindex, pointer(self.__flann_parameters))
self.__curindex = None
self.__curindex_data = None
self.__curindex_type = None
self.__curindex = flann.load_index[pts.dtype.type](c_char_p(to_bytes(filename)), pts, npts, dim)
self.__curindex_data = pts
self.__curindex_type = pts.dtype.type
def nn_index(self, qpts, num_neighbors=1, **kwargs):
if (self.__curindex == None):
raise FLANNException('build_index(...) method not called first or current index deleted.')
if (not (qpts.dtype.type in allowed_types)):
raise FLANNException(('Cannot handle type: %s' % qpts.dtype))
if (self.__curindex_type != qpts.dtype.type):
raise FLANNException('Index and query must have the same type')
qpts = ensure_2d_array(qpts, default_flags)
(npts, dim) = self.__curindex_data.shape
if (qpts.size == dim):
qpts.reshape(1, dim)
nqpts = qpts.shape[0]
assert (qpts.shape[1] == dim)
assert (npts >= num_neighbors)
result = empty((nqpts, num_neighbors), dtype=index_type)
if (self.__curindex_type == float64):
dists = empty((nqpts, num_neighbors), dtype=float64)
else:
dists = empty((nqpts, num_neighbors), dtype=float32)
self.__flann_parameters.update(kwargs)
flann.find_nearest_neighbors_index[self.__curindex_type](self.__curindex, qpts, nqpts, result, dists, num_neighbors, pointer(self.__flann_parameters))
if (num_neighbors == 1):
return (result.reshape(nqpts), dists.reshape(nqpts))
else:
return (result, dists)
def nn_radius(self, query, radius, **kwargs):
if (self.__curindex == None):
raise FLANNException('build_index(...) method not called first or current index deleted.')
if (not (query.dtype.type in allowed_types)):
raise FLANNException(('Cannot handle type: %s' % query.dtype))
if (self.__curindex_type != query.dtype.type):
raise FLANNException('Index and query must have the same type')
(npts, dim) = self.__curindex_data.shape
assert (query.shape[0] == dim)
result = empty(npts, dtype=index_type)
if (self.__curindex_type == float64):
dists = empty(npts, dtype=float64)
else:
dists = empty(npts, dtype=float32)
self.__flann_parameters.update(kwargs)
nn = flann.radius_search[self.__curindex_type](self.__curindex, query, result, dists, npts, radius, pointer(self.__flann_parameters))
return (result[0:nn], dists[0:nn])
def delete_index(self, **kwargs):
self.__flann_parameters.update(kwargs)
if (self.__curindex != None):
flann.free_index[self.__curindex_type](self.__curindex, pointer(self.__flann_parameters))
self.__curindex = None
self.__curindex_data = None
def kmeans(self, pts, num_clusters, max_iterations=None, dtype=None, **kwargs):
if ((int(num_clusters) != num_clusters) or (num_clusters < 1)):
raise FLANNException('num_clusters must be an integer >= 1')
if (num_clusters == 1):
if ((dtype == None) or (dtype == pts.dtype)):
return mean(pts, 0).reshape(1, pts.shape[1])
else:
return dtype(mean(pts, 0).reshape(1, pts.shape[1]))
return self.hierarchical_kmeans(pts, int(num_clusters), 1, max_iterations, dtype, **kwargs)
def hierarchical_kmeans(self, pts, branch_size, num_branches, max_iterations=None, dtype=None, **kwargs):
if (not (pts.dtype.type in allowed_types)):
raise FLANNException(('Cannot handle type: %s' % pts.dtype))
if ((int(branch_size) != branch_size) or (branch_size < 2)):
raise FLANNException('branch_size must be an integer >= 2.')
branch_size = int(branch_size)
if ((int(num_branches) != num_branches) or (num_branches < 1)):
raise FLANNException('num_branches must be an integer >= 1.')
num_branches = int(num_branches)
if (max_iterations == None):
max_iterations = (- 1)
else:
max_iterations = int(max_iterations)
pts = ensure_2d_array(pts, default_flags)
(npts, dim) = pts.shape
num_clusters = (((branch_size - 1) * num_branches) + 1)
if (pts.dtype.type == float64):
result = empty((num_clusters, dim), dtype=float64)
else:
result = empty((num_clusters, dim), dtype=float32)
self.__ensureRandomSeed(kwargs)
params = {'iterations': max_iterations, 'algorithm': 'kmeans', 'branching': branch_size, 'random_seed': kwargs['random_seed']}
self.__flann_parameters.update(params)
numclusters = flann.compute_cluster_centers[pts.dtype.type](pts, npts, dim, num_clusters, result, pointer(self.__flann_parameters))
if (numclusters <= 0):
raise FLANNException('Error occured during clustering procedure.')
if (dtype == None):
return result
else:
return dtype(result)
def __ensureRandomSeed(self, kwargs):
if (not ('random_seed' in kwargs)):
kwargs['random_seed'] = self.__rn_gen.randint((2 ** 30)) |
def ycbcr2bgr(img):
img_type = img.dtype
img = (_convert_input_type_range(img) * 255)
out_img = ((np.matmul(img, [[0., 0., 0.], [0., (- 0.), 0], [0, (- 0.), 0.]]) * 255.0) + [(- 276.836), 135.576, (- 222.921)])
out_img = _convert_output_type_range(out_img, img_type)
return out_img |
def smart_round(x, base=None):
if (base is None):
if (x > (32 * 8)):
round_base = 32
elif (x > (16 * 8)):
round_base = 16
else:
round_base = 8
else:
round_base = base
return max(round_base, (round((x / float(round_base))) * round_base)) |
def calculate_FrameAccuracy(pred, true):
compare_array = np.all(((pred - true) == 0), axis=(- 1))
hit = np.sum(compare_array.astype(int))
sample_nb = true.shape[0]
accuracy_frame = ((hit * 1.0) / sample_nb)
return accuracy_frame |
def test_vector(doc):
l = m.cast_vector()
assert (l == [1])
l.append(2)
assert m.load_vector(l)
assert m.load_vector(tuple(l))
assert (m.cast_bool_vector() == [True, False])
assert m.load_bool_vector([True, False])
assert (doc(m.cast_vector) == 'cast_vector() -> List[int]')
assert (doc(m.load_vector) == 'load_vector(arg0: List[int]) -> bool')
assert (m.cast_ptr_vector() == ['lvalue', 'lvalue']) |
def add_head(code_split_dir, source_path, new_split_path):
code_split_list = os.listdir(code_split_dir)
source_file = open(source_path, encoding='utf-8')
source_lines = source_file.readlines()
new_file = open(new_split_path, 'w', encoding='utf-8')
os.chdir(code_split_dir)
for f in tqdm(code_split_list):
file = open(f, encoding='utf-8')
idx = f.replace('.txt', '')
lines = file.readlines()
if (len(lines) == 0):
line = ''
else:
line = lines[0]
s_line = source_lines[(int(idx) - 1)]
s_line = (s_line[0:(s_line.find(')') + 1)] + ';')
line = ((s_line + line) + '\n')
new_file.write(line)
new_file.close()
source_file.close() |
class DenseModel(nn.Module):
def __init__(self, feature_size: int, output_shape: tuple, layers: int, hidden_size: int, dist='normal', activation=nn.ELU):
super().__init__()
self._output_shape = output_shape
self._layers = layers
self._hidden_size = hidden_size
self._dist = dist
self.activation = activation
self._feature_size = feature_size
self.model = self.build_model()
def build_model(self):
model = [nn.Linear(self._feature_size, self._hidden_size)]
model += [self.activation()]
for i in range((self._layers - 1)):
model += [nn.Linear(self._hidden_size, self._hidden_size)]
model += [self.activation()]
model += [nn.Linear(self._hidden_size, int(np.prod(self._output_shape)))]
return nn.Sequential(*model)
def forward(self, features):
dist_inputs = self.model(features)
reshaped_inputs = torch.reshape(dist_inputs, (features.shape[:(- 1)] + self._output_shape))
if (self._dist == 'normal'):
return td.independent.Independent(td.Normal(reshaped_inputs, 1), len(self._output_shape))
if (self._dist == 'binary'):
return td.independent.Independent(td.Bernoulli(logits=reshaped_inputs), len(self._output_shape))
raise NotImplementedError(self._dist) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.