code stringlengths 101 5.91M |
|---|
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--loadSizeW', type=int, default=220, help='scale images to this size (width)')
parser.add_argument('--loadSizeH', type=int, default=220, help='scale images to this size (height)')
parser.add_argument('--fineSizeW', type=int, default=200, help='then crop to this size (width)')
parser.add_argument('--fineSizeH', type=int, default=200, help='then crop to this size (height)')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--netD', type=str, default='set', help='selects model to use for netD')
parser.add_argument('--netG', type=str, default='set', help='selects model to use for netG')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--dataset_mode', type=str, default='unaligned_seg', help='chooses how datasets are loaded. [unaligned | aligned | single]')
parser.add_argument('--model', type=str, default='insta_gan', help='chooses which model to use. cycle_gan, pix2pix, test')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop|none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, _) = parser.parse_known_args()
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
(opt, _) = parser.parse_known_args()
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain
if opt.suffix:
suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '')
opt.name = (opt.name + suffix)
self.print_options(opt)
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if (len(opt.gpu_ids) > 0):
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt |
def urlsafe_b64decode(data):
pad = (b'=' * (4 - (len(data) & 3)))
return base64.urlsafe_b64decode((data + pad)) |
_start_docstrings('RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ', ROBERTA_START_DOCSTRING)
class DeeRobertaForSequenceClassification(BertPreTrainedModel):
config_class = RobertaConfig
base_model_prefix = 'roberta'
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.num_layers = config.num_hidden_layers
self.roberta = DeeRobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING)
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_layer=(- 1), train_highway=False):
exit_layer = self.num_layers
try:
outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = ((logits,) + outputs[2:])
except HighwayException as e:
outputs = e.message
exit_layer = e.exit_layer
logits = outputs[0]
if (not self.training):
original_entropy = entropy(logits)
highway_entropy = []
highway_logits_all = []
if (labels is not None):
if (self.num_labels == 1):
loss_fct = MSELoss()
loss = loss_fct(logits.view((- 1)), labels.view((- 1)))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1)))
highway_losses = []
for highway_exit in outputs[(- 1)]:
highway_logits = highway_exit[0]
if (not self.training):
highway_logits_all.append(highway_logits)
highway_entropy.append(highway_exit[2])
if (self.num_labels == 1):
loss_fct = MSELoss()
highway_loss = loss_fct(highway_logits.view((- 1)), labels.view((- 1)))
else:
loss_fct = CrossEntropyLoss()
highway_loss = loss_fct(highway_logits.view((- 1), self.num_labels), labels.view((- 1)))
highway_losses.append(highway_loss)
if train_highway:
outputs = ((sum(highway_losses[:(- 1)]),) + outputs)
else:
outputs = ((loss,) + outputs)
if (not self.training):
outputs = (outputs + ((original_entropy, highway_entropy), exit_layer))
if (output_layer >= 0):
outputs = (((outputs[0],) + (highway_logits_all[output_layer],)) + outputs[2:])
return outputs |
def val_meta(args, model, val_loader, device):
meta_trained_state = model.state_dict()
val_model = copy.deepcopy(model)
val_psnrs = []
for (img, pose, kinv, bound) in val_loader:
(img, pose, kinv, bound) = (img.to(device), pose.to(device), kinv.to(device), bound.to(device))
(img, pose, kinv, bound) = (img.squeeze(), pose.squeeze(), kinv.squeeze(), bound.squeeze())
(rays_o, rays_d) = get_rays_tourism(img.shape[0], img.shape[1], kinv, pose)
left_width = (img.shape[1] // 2)
right_width = (img.shape[1] - left_width)
(tto_img, test_img) = torch.split(img, [left_width, right_width], dim=1)
(tto_rays_o, test_rays_o) = torch.split(rays_o, [left_width, right_width], dim=1)
(tto_rays_d, test_rays_d) = torch.split(rays_d, [left_width, right_width], dim=1)
val_model.load_state_dict(meta_trained_state)
val_optim = torch.optim.SGD(val_model.parameters(), args.inner_lr)
inner_loop(val_model, val_optim, tto_img, tto_rays_o, tto_rays_d, bound, args.num_samples, args.train_batchsize, args.inner_steps)
psnr = report_result(val_model, test_img, test_rays_o, test_rays_d, bound, args.num_samples, args.test_batchsize)
val_psnrs.append(psnr)
val_psnr = torch.stack(val_psnrs).mean()
return val_psnr |
def word_ids_to_sentence(word_ids, vocabulary):
words = [vocabulary[word_id] for word_id in word_ids]
return ' '.join(words) |
_module()
class DAFormerHeadPanopticShared(BaseDecodeHeadPanoptic):
def __init__(self, **kwargs):
super(DAFormerHeadPanopticShared, self).__init__(input_transform='multiple_select', **kwargs)
assert (not self.align_corners)
decoder_params = kwargs['decoder_params']
embed_dims = decoder_params['embed_dims']
if isinstance(embed_dims, int):
embed_dims = ([embed_dims] * len(self.in_index))
embed_cfg = decoder_params['embed_cfg']
embed_neck_cfg = decoder_params['embed_neck_cfg']
if (embed_neck_cfg == 'same_as_embed_cfg'):
embed_neck_cfg = embed_cfg
fusion_cfg = decoder_params['fusion_cfg']
for cfg in [embed_cfg, embed_neck_cfg, fusion_cfg]:
if ((cfg is not None) and ('aspp' in cfg['type'])):
cfg['align_corners'] = self.align_corners
(self.embed_layers_semantic, self.fuse_layer_semantic) = get_layers(self.in_index, self.in_channels, embed_dims, self.channels, embed_neck_cfg, embed_cfg, fusion_cfg)
self.act_panop = kwargs['activate_panoptic']
self.debug = kwargs['debug']
self.debug_output = {}
def forward(self, inputs):
x = head_forward(inputs, self.in_index, self.embed_layers_semantic, self.fuse_layer_semantic, self.align_corners)
semantic_pred = self.cls_seg(x)
self.debug_output.update({'semantic': semantic_pred.detach()})
center_pred = self.reg_cnt(x)
offset_pred = self.reg_ofs(x)
self.debug_output.update({'center': center_pred.detach()})
self.debug_output.update({'offset': offset_pred.detach()})
depth_pred = torch.zeros(1).cuda()
self.debug_output.update({'depth': depth_pred.detach()})
return (semantic_pred, center_pred, offset_pred, depth_pred) |
def segment_window_all(x_train, y_train, window_size, n_sensor_val):
window_segments = np.zeros((len(x_train), window_size, n_sensor_val))
labels = np.zeros((len(y_train),))
total_len = len(x_train)
for i in range(total_len):
end = (i + window_size)
if (end > total_len):
pad_len = (end - total_len)
window_segments[i] = x_train[(i - pad_len):end]
labels[i] = y_train[(total_len - 1)]
else:
window_segments[i] = x_train[i:end]
labels[i] = y_train[(end - 1)]
return (window_segments, labels) |
def _return_inverse(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
if (not torch.jit.is_scripting()):
if ((type(input) is not Tensor) and has_torch_function((input,))):
return _unique_impl(input, sorted, return_inverse, return_counts, dim)
(output, inverse_indices, _) = _unique_impl(input, sorted, return_inverse, return_counts, dim)
return (output, inverse_indices) |
def validate_base_url(ctx: click.core.Context, param: click.core.Parameter, raw_value: str) -> str:
try:
netloc = urlparse(raw_value).netloc
except ValueError as exc:
raise click.UsageError(INVALID_BASE_URL_MESSAGE) from exc
if (raw_value and (not netloc)):
raise click.UsageError(INVALID_BASE_URL_MESSAGE)
return raw_value |
def get_args():
parser = argparse.ArgumentParser()
massformer_train.add_massformer_train_args(parser)
nn_utils.add_hyperopt_args(parser)
return parser.parse_args() |
def clean_il_hp(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".')
df = to_dask(df)
df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object)
df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0)))
df = df.rename(columns={'_temp_': f'{column}_clean'})
df = df.drop(columns=['clean_code_tup'])
if inplace:
df[column] = df[f'{column}_clean']
df = df.drop(columns=f'{column}_clean')
df = df.rename(columns={column: f'{column}_clean'})
with ProgressBar(minimum=1, disable=(not progress)):
df = df.compute()
return df |
class FixedPolicy(Policy):
def __init__(self, env_spec, scripted_actions, agent_infos=None):
super().__init__(env_spec)
if (agent_infos is None):
agent_infos = ([{}] * len(scripted_actions))
self._scripted_actions = scripted_actions
self._agent_infos = agent_infos
self._indices = [0]
def reset(self, dones=None):
if (dones is None):
dones = [True]
if (len(dones) > 1):
raise ValueError('FixedPolicy does not support more than one action at a time.')
self._indices[0] = 0
def set_param_values(self, params):
del params
def get_param_values(self):
return ()
def get_action(self, observation):
del observation
action = self._scripted_actions[self._indices[0]]
agent_info = self._agent_infos[self._indices[0]]
self._indices[0] += 1
return (action, agent_info)
def get_actions(self, observations):
if (len(observations) != 1):
raise ValueError('FixedPolicy does not support more than one observation at a time.')
return self.get_action(observations[0]) |
def segment_to_example(segment, label):
raw_segment = np.array(segment, dtype=np.float32).reshape((- 1)).tostring()
raw_label = np.array(label, dtype=np.uint8).reshape((- 1)).tostring()
example = tf.train.Example(features=tf.train.Features(feature={'label': bytes_feature(raw_label), 'segment': bytes_feature(raw_segment)}))
return example |
def _isomorphisms(E, F):
from .ell_generic import is_EllipticCurve
if ((not is_EllipticCurve(E)) or (not is_EllipticCurve(F))):
raise ValueError('arguments are not elliptic curves')
j = E.j_invariant()
if (j != F.j_invariant()):
return
K = E.base_ring()
from sage.rings.polynomial.polynomial_ring import polygen
x = polygen(K, 'x')
(a1E, a2E, a3E, a4E, a6E) = E.ainvs()
(a1F, a2F, a3F, a4F, a6F) = F.ainvs()
char = K.characteristic()
if (char == 2):
if (j == 0):
ulist = ((x ** 3) - (a3E / a3F)).roots(multiplicities=False)
for u in ulist:
slist = (((((x ** 4) + (a3E * x)) + (((a2F ** 2) + a4F) * (u ** 4))) + (a2E ** 2)) + a4E).roots(multiplicities=False)
for s in slist:
r = (((s ** 2) + a2E) + (a2F * (u ** 2)))
tlist = (((((((x ** 2) + (a3E * x)) + (r ** 3)) + (a2E * (r ** 2))) + (a4E * r)) + a6E) + (a6F * (u ** 6))).roots(multiplicities=False)
for t in tlist:
(yield (u, r, s, t))
else:
u = (a1E / a1F)
r = ((a3E + (a3F * (u ** 3))) / a1E)
slist = (((((x ** 2) + (a1E * x)) + r) + a2E) + (a2F * (u ** 2))).roots(multiplicities=False)
for s in slist:
t = (((((a4E + (a4F * (u ** 4))) + (s * a3E)) + ((r * s) * a1E)) + (r ** 2)) / a1E)
(yield (u, r, s, t))
return
(b2E, b4E, b6E, b8E) = E.b_invariants()
(b2F, b4F, b6F, b8F) = F.b_invariants()
if (char == 3):
if (j == 0):
ulist = ((x ** 4) - (b4E / b4F)).roots(multiplicities=False)
for u in ulist:
s = (a1E - (a1F * u))
t = (a3E - (a3F * (u ** 3)))
rlist = ((((x ** 3) - (b4E * x)) + b6E) - (b6F * (u ** 6))).roots(multiplicities=False)
for r in rlist:
(yield (u, r, s, (t + (r * a1E))))
else:
ulist = ((x ** 2) - (b2E / b2F)).roots(multiplicities=False)
for u in ulist:
r = (((b4F * (u ** 4)) - b4E) / b2E)
s = (a1E - (a1F * u))
t = ((a3E - (a3F * (u ** 3))) + (a1E * r))
(yield (u, r, s, t))
return
(c4E, c6E) = E.c_invariants()
(c4F, c6F) = F.c_invariants()
if (j == 0):
(m, um) = (6, (c6E / c6F))
elif (j == 1728):
(m, um) = (4, (c4E / c4F))
else:
(m, um) = (2, ((c6E * c4F) / (c6F * c4E)))
ulist = ((x ** m) - um).roots(multiplicities=False)
for u in ulist:
s = (((a1F * u) - a1E) / 2)
r = (((((a2F * (u ** 2)) + (a1E * s)) + (s ** 2)) - a2E) / 3)
t = ((((a3F * (u ** 3)) - (a1E * r)) - a3E) / 2)
(yield (u, r, s, t)) |
def add_sampler_FID_args(parser):
parser.add_argument('--n_samples', type=int, required=True)
parser.add_argument('--latents_path', type=str) |
def main():
os.system('curl | tar xvzf -')
os.rename('writingPrompts/valid.wp_source', 'writingPrompts/dev.wp_source')
os.rename('writingPrompts/valid.wp_target', 'writingPrompts/dev.wp_target')
save_dir = 'data/wp'
os.makedirs(save_dir, exist_ok=True)
tokenizer = GPT2Tokenizer.from_pretrained('gpt2-large')
split_size = {'train': 10000, 'dev': 5000, 'test': 1000}
for split in ['train', 'dev', 'test']:
src_lines = open(f'writingPrompts/{split}.wp_source').readlines()
tgt_lines = open(f'writingPrompts/{split}.wp_target').readlines()
examples = []
for (src, tgt) in tqdm(zip(src_lines, tgt_lines), desc=split, total=len(tgt_lines)):
src = src.strip().replace('<newline>', '\n')
tgt = tgt.strip().replace('<newline>', '\n')
if (len(tokenizer.tokenize(f'{src} [SEP] {tgt} <|endoftext|>')) > 1024):
continue
examples.append({'condition': src, 'text': tgt})
if (len(examples) >= split_size[split]):
break
print(f'#{split}: {len(examples)}')
pickle.dump(examples, open(f'{save_dir}/{split}.pickle', 'wb')) |
class HallucinationGenerator():
def __init__(self, device):
self._device = device
self._tokenizer = spacy.load('en')
self._parser = spacy.load('en')
self._parser.add_pipe(BeneparComponent('benepar_en3_large'))
self._infiller = BART(init='bart.large').to(self._device)
def parse(self, text):
return list(self._parser(text).sents)
def depth_first_search(self, root, sent_length, depth=0):
children = list(root._.children)
template = ('' if (len(children) != 0) else root.text)
layer = 0
answers = []
for child in children:
child_result = self.depth_first_search(root=child, sent_length=sent_length, depth=(depth + 1))
template = ((template + ' ') + child_result['template'])
layer = max(layer, (child_result['layer'] + 1))
answers.extend(child_result['answers'])
if ((root.text.lower() in en_stopwords) or (not any([ch.isalnum() for ch in root.text]))):
p_mask = 0.0
else:
p_mask = ((len(root.text.split()) / sent_length) / (layer + 1))
if (random.random() < p_mask):
template = '<mask>'
answers = [root.text]
return {'template': template.strip(), 'layer': layer, 'answers': answers}
def hallucinate_sent(self, root):
for _ in range(5):
result = self.depth_first_search(root=root, sent_length=len(root.text.split()))
if ((result['template'] != '<mask>') and ('<mask> <mask>' not in result['template']) and (len(result['answers']) > 0)):
break
else:
result = None
if (result is None):
return None
gen_text = self._infiller.generate(src_texts=[result['template']], sampling=True, topp=SAMPLING_TOPP, max_len=MAX_LENGTH)[0]
gen_text = self.cleantext(gen_text)
pattern = result['template']
for special_token in re_special_tokens:
pattern = pattern.replace(special_token, f'\{special_token}')
pattern = pattern.replace('<mask>', '(.*)')
pattern = (pattern + '$')
try:
matching = re.match(pattern=pattern, string=gen_text, flags=re.I)
except:
matching = None
if (matching is None):
return None
else:
fillings = list(matching.groups())
result['original_text'] = self.cleantext(root.text)
result['gen_text'] = gen_text
result['fillings'] = fillings
return result
def cleantext(self, text):
return ' '.join([token.text for token in self._tokenizer(text)])
def hallucinate(self, input_text):
roots = self.parse(input_text)
result = {key: [] for key in ['template', 'original_text', 'gen_text', 'answers', 'fillings']}
for root in roots:
sent_result = self.hallucinate_sent(root=root)
if (sent_result is None):
return None
for key in result:
if (key in ['answers', 'fillings']):
result[key].extend(sent_result[key])
else:
result[key].append(sent_result[key])
for key in ['template', 'original_text', 'gen_text']:
result[key] = ' '.join(result[key])
return result |
_utils.test()
def test_remove_rwtexture_ndim():
with pytest.raises(ti.TaichiRuntimeError, match='The shape argument for texture is deprecated in v1.6.0, and it is removed in v1.7.0. Please use ndim instead. \\(Note that you no longer need the exact texture size.\\)'):
ti.graph.Arg(ti.graph.ArgKind.RWTEXTURE, 'x', shape=(128, 128), fmt=ti.Format.r32f) |
def dict_to_query(d=dict(), **kwargs):
d = {**d, **kwargs}
return '&'.join([f'`{k}`=="{v}"' for (k, v) in d.items()]) |
def cuda_dummy_step(function_manager: PyCUDAFunctionManager, data_manager: PyCUDADataManager, env_resetter: PyCUDAEnvironmentReset, target: int, step: int):
env_resetter.reset_when_done(data_manager)
step = np.int32(step)
target = np.int32(target)
test_step = function_manager.get_function('testkernel')
test_step(data_manager.device_data('X'), data_manager.device_data('Y'), data_manager.device_data('_done_'), data_manager.device_data(f'{_ACTIONS}'), data_manager.device_data('multiplier'), target, step, data_manager.meta_info('episode_length'), block=function_manager.block, grid=function_manager.grid) |
def register_Ns3TcpHybla_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TcpHybla const &', 'sock')])
cls.add_method('Fork', 'ns3::Ptr< ns3::TcpCongestionOps >', [], is_virtual=True)
cls.add_method('GetName', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('PktsAcked', 'void', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('uint32_t', 'segmentsAcked'), param('ns3::Time const &', 'rtt')], is_virtual=True)
cls.add_method('CongestionAvoidance', 'void', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('uint32_t', 'segmentsAcked')], visibility='protected', is_virtual=True)
cls.add_method('SlowStart', 'uint32_t', [param('ns3::Ptr< ns3::TcpSocketState >', 'tcb'), param('uint32_t', 'segmentsAcked')], visibility='protected', is_virtual=True)
return |
class DatasetMapper():
def __init__(self, is_train: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], image_format: str, use_instance_mask: bool=False, use_keypoint: bool=False, instance_mask_format: str='polygon', keypoint_hflip_indices: Optional[np.ndarray]=None, precomputed_proposal_topk: Optional[int]=None, recompute_boxes: bool=False):
if recompute_boxes:
assert use_instance_mask, 'recompute_boxes requires instance masks'
self.is_train = is_train
self.augmentations = augmentations
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
logger = logging.getLogger(__name__)
logger.info(('Augmentations used in training: ' + str(augmentations)))
def from_config(cls, cfg, is_train: bool=True):
augs = utils.build_augmentation(cfg, is_train)
if (cfg.INPUT.CROP.ENABLED and is_train):
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
recompute_boxes = cfg.MODEL.MASK_ON
else:
recompute_boxes = False
ret = {'is_train': is_train, 'augmentations': augs, 'image_format': cfg.INPUT.FORMAT, 'use_instance_mask': cfg.MODEL.MASK_ON, 'instance_mask_format': cfg.INPUT.MASK_FORMAT, 'use_keypoint': cfg.MODEL.KEYPOINT_ON, 'recompute_boxes': recompute_boxes}
if cfg.MODEL.KEYPOINT_ON:
ret['keypoint_hflip_indices'] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
if cfg.MODEL.LOAD_PROPOSALS:
ret['precomputed_proposal_topk'] = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN if is_train else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST)
return ret
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict['file_name'], format=self.image_format)
utils.check_image_size(dataset_dict, image)
if ('sem_seg_file_name' in dataset_dict):
sem_seg_gt = utils.read_image(dataset_dict.pop('sem_seg_file_name'), 'L').squeeze(2)
else:
sem_seg_gt = None
aug_input = T.StandardAugInput(image, sem_seg=sem_seg_gt)
transforms = aug_input.apply_augmentations(self.augmentations)
(image, sem_seg_gt) = (aug_input.image, aug_input.sem_seg)
image_shape = image.shape[:2]
dataset_dict['image'] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if (sem_seg_gt is not None):
dataset_dict['sem_seg'] = torch.as_tensor(sem_seg_gt.astype('long'))
if (self.proposal_topk is not None):
utils.transform_proposals(dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk)
if (not self.is_train):
dataset_dict.pop('annotations', None)
dataset_dict.pop('sem_seg_file_name', None)
return dataset_dict
if ('annotations' in dataset_dict):
for anno in dataset_dict['annotations']:
if (not self.use_instance_mask):
anno.pop('segmentation', None)
if (not self.use_keypoint):
anno.pop('keypoints', None)
annos = [utils.transform_instance_annotations(obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices) for obj in dataset_dict.pop('annotations') if (obj.get('iscrowd', 0) == 0)]
instances = utils.annotations_to_instances(annos, image_shape, mask_format=self.instance_mask_format)
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict['instances'] = utils.filter_empty_instances(instances)
return dataset_dict |
class CommonSenseQAScenario(Scenario):
name = 'commonsenseqa'
description = 'Benchmark from
tags = ['knowledge', 'multiple_choice']
def get_instances(self, output_path: str) -> List[Instance]:
data_path = os.path.join(output_path, 'data')
ensure_directory_exists(data_path)
instances = []
base_url = '
split_mapping = {'train': 'train', 'val': 'dev'}
for split in ['train', 'val']:
file_path = os.path.join(data_path, f'commonsenseqa_{split}.jsonl')
ensure_file_downloaded(source_url=base_url.format(split_mapping[split]), target_path=file_path)
hlog(f'Reading {file_path}')
with open(file_path) as f:
for line in f:
item = json.loads(line)
instances.append(self.json_to_instance(item, split))
return instances
def json_to_instance(item, split) -> Instance:
letter2idx = {'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4}
question = item['question']['stem']
answers = [answer['text'] for answer in item['question']['choices']]
correct_choice = letter2idx[item['answerKey']]
correct_answer = answers[correct_choice]
assert (len(answers) == 5)
assert (item['question']['choices'][correct_choice]['label'] == item['answerKey'])
return _make_instance(question, answers, correct_answer, split) |
def splint(a, b, tck, full_output=0):
if isinstance(tck, BSpline):
if (tck.c.ndim > 1):
mesg = 'Calling splint() with BSpline objects with c.ndim > 1 is not recommended. Use BSpline.integrate() instead.'
warnings.warn(mesg, DeprecationWarning)
if (full_output != 0):
mesg = ('full_output = %s is not supported. Proceeding as if full_output = 0' % full_output)
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output) |
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-06):
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (mu1.shape == mu2.shape), 'Training and test mean vectors have different lengths'
assert (sigma1.shape == sigma2.shape), 'Training and test covariances have different dimensions'
diff = (mu1 - mu2)
(covmean, _) = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if (not np.isfinite(covmean).all()):
msg = ('fid calculation produces singular product; adding %s to diagonal of cov estimates' % eps)
warnings.warn(msg)
offset = (np.eye(sigma1.shape[0]) * eps)
covmean = linalg.sqrtm((sigma1 + offset).dot((sigma2 + offset)))
if np.iscomplexobj(covmean):
if (not np.allclose(np.diagonal(covmean).imag, 0, atol=0.001)):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (((diff.dot(diff) + np.trace(sigma1)) + np.trace(sigma2)) - (2 * tr_covmean)) |
_model
def legacy_seresnext101_32x4d(pretrained=False, **kwargs):
model_args = dict(block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) |
class EventWriter():
def write(self, **kwargs):
raise NotImplementedError
def close(self):
pass |
_REGISTRY.register()
class Kinetics(torch.utils.data.Dataset):
def __init__(self, cfg, mode, num_retries=10):
assert (mode in ['train', 'val', 'test']), "Split '{}' not supported for Kinetics".format(mode)
self.mode = mode
self.cfg = cfg
self._video_meta = {}
self._num_retries = num_retries
if (self.mode in ['train', 'val']):
self._num_clips = 1
elif (self.mode in ['test']):
self._num_clips = (cfg.TEST.NUM_ENSEMBLE_VIEWS * cfg.TEST.NUM_SPATIAL_CROPS)
logger.info('Constructing Kinetics {}...'.format(mode))
self._construct_loader()
self.aug = False
self.rand_erase = False
self.use_temporal_gradient = False
self.temporal_gradient_rate = 0.0
if ((self.mode == 'train') and self.cfg.AUG.ENABLE):
self.aug = True
if (self.cfg.AUG.RE_PROB > 0):
self.rand_erase = True
def _construct_loader(self):
path_to_file = os.path.join(self.cfg.DATA.PATH_TO_DATA_DIR, '{}.csv'.format(self.mode))
assert g_pathmgr.exists(path_to_file), '{} dir not found'.format(path_to_file)
self._path_to_videos = []
self._labels = []
self._spatial_temporal_idx = []
with g_pathmgr.open(path_to_file, 'r') as f:
for (clip_idx, path_label) in enumerate(f.read().splitlines()):
assert (len(path_label.split(self.cfg.DATA.PATH_LABEL_SEPARATOR)) == 2)
(path, label) = path_label.split(self.cfg.DATA.PATH_LABEL_SEPARATOR)
for idx in range(self._num_clips):
self._path_to_videos.append(os.path.join(self.cfg.DATA.PATH_PREFIX, path))
self._labels.append(int(label))
self._spatial_temporal_idx.append(idx)
self._video_meta[((clip_idx * self._num_clips) + idx)] = {}
assert (len(self._path_to_videos) > 0), 'Failed to load Kinetics split {} from {}'.format(self._split_idx, path_to_file)
logger.info('Constructing kinetics dataloader (size: {}) from {}'.format(len(self._path_to_videos), path_to_file))
def __getitem__(self, index):
short_cycle_idx = None
if isinstance(index, tuple):
(index, short_cycle_idx) = index
if (self.mode in ['train', 'val']):
temporal_sample_index = (- 1)
spatial_sample_index = (- 1)
min_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[0]
max_scale = self.cfg.DATA.TRAIN_JITTER_SCALES[1]
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE
if (short_cycle_idx in [0, 1]):
crop_size = int(round((self.cfg.MULTIGRID.SHORT_CYCLE_FACTORS[short_cycle_idx] * self.cfg.MULTIGRID.DEFAULT_S)))
if (self.cfg.MULTIGRID.DEFAULT_S > 0):
min_scale = int(round(((float(min_scale) * crop_size) / self.cfg.MULTIGRID.DEFAULT_S)))
elif (self.mode in ['test']):
temporal_sample_index = (self._spatial_temporal_idx[index] // self.cfg.TEST.NUM_SPATIAL_CROPS)
spatial_sample_index = ((self._spatial_temporal_idx[index] % self.cfg.TEST.NUM_SPATIAL_CROPS) if (self.cfg.TEST.NUM_SPATIAL_CROPS > 1) else 1)
(min_scale, max_scale, crop_size) = (([self.cfg.DATA.TEST_CROP_SIZE] * 3) if (self.cfg.TEST.NUM_SPATIAL_CROPS > 1) else (([self.cfg.DATA.TRAIN_JITTER_SCALES[0]] * 2) + [self.cfg.DATA.TEST_CROP_SIZE]))
assert (len({min_scale, max_scale}) == 1)
else:
raise NotImplementedError('Does not support {} mode'.format(self.mode))
sampling_rate = utils.get_random_sampling_rate(self.cfg.MULTIGRID.LONG_CYCLE_SAMPLING_RATE, self.cfg.DATA.SAMPLING_RATE)
for i_try in range(self._num_retries):
video_container = None
try:
video_container = container.get_video_container(self._path_to_videos[index], self.cfg.DATA_LOADER.ENABLE_MULTI_THREAD_DECODE, self.cfg.DATA.DECODING_BACKEND)
except Exception as e:
logger.info('Failed to load video from {} with error {}'.format(self._path_to_videos[index], e))
if (video_container is None):
logger.warning('Failed to meta load video idx {} from {}; trial {}'.format(index, self._path_to_videos[index], i_try))
if ((self.mode not in ['test']) and (i_try > (self._num_retries // 2))):
index = random.randint(0, (len(self._path_to_videos) - 1))
continue
frames = decoder.decode(video_container, sampling_rate, self.cfg.DATA.NUM_FRAMES, temporal_sample_index, self.cfg.TEST.NUM_ENSEMBLE_VIEWS, video_meta=self._video_meta[index], target_fps=self.cfg.DATA.TARGET_FPS, backend=self.cfg.DATA.DECODING_BACKEND, max_spatial_scale=min_scale, use_offset=self.cfg.DATA.USE_OFFSET_SAMPLING)
if (frames is None):
logger.warning('Failed to decode video idx {} from {}; trial {}'.format(index, self._path_to_videos[index], i_try))
if ((self.mode not in ['test']) and (i_try > (self._num_retries // 2))):
index = random.randint(0, (len(self._path_to_videos) - 1))
continue
if self.aug:
if (self.cfg.AUG.NUM_SAMPLE > 1):
frame_list = []
label_list = []
index_list = []
for _ in range(self.cfg.AUG.NUM_SAMPLE):
new_frames = self._aug_frame(frames, spatial_sample_index, min_scale, max_scale, crop_size)
label = self._labels[index]
new_frames = utils.pack_pathway_output(self.cfg, new_frames)
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return (frame_list, label_list, index_list, {})
else:
frames = self._aug_frame(frames, spatial_sample_index, min_scale, max_scale, crop_size)
else:
frames = utils.tensor_normalize(frames, self.cfg.DATA.MEAN, self.cfg.DATA.STD)
frames = frames.permute(3, 0, 1, 2)
frames = utils.spatial_sampling(frames, spatial_idx=spatial_sample_index, min_scale=min_scale, max_scale=max_scale, crop_size=crop_size, random_horizontal_flip=self.cfg.DATA.RANDOM_FLIP, inverse_uniform_sampling=self.cfg.DATA.INV_UNIFORM_SAMPLE)
label = self._labels[index]
frames = utils.pack_pathway_output(self.cfg, frames)
return (frames, label, index, {})
else:
raise RuntimeError('Failed to fetch video after {} retries.'.format(self._num_retries))
def _aug_frame(self, frames, spatial_sample_index, min_scale, max_scale, crop_size):
aug_transform = create_random_augment(input_size=(frames.size(1), frames.size(2)), auto_augment=self.cfg.AUG.AA_TYPE, interpolation=self.cfg.AUG.INTERPOLATION)
frames = frames.permute(0, 3, 1, 2)
list_img = self._frame_to_list_img(frames)
list_img = aug_transform(list_img)
frames = self._list_img_to_frames(list_img)
frames = frames.permute(0, 2, 3, 1)
frames = utils.tensor_normalize(frames, self.cfg.DATA.MEAN, self.cfg.DATA.STD)
frames = frames.permute(3, 0, 1, 2)
(scl, asp) = (self.cfg.DATA.TRAIN_JITTER_SCALES_RELATIVE, self.cfg.DATA.TRAIN_JITTER_ASPECT_RELATIVE)
relative_scales = (None if ((self.mode not in ['train']) or (len(scl) == 0)) else scl)
relative_aspect = (None if ((self.mode not in ['train']) or (len(asp) == 0)) else asp)
frames = utils.spatial_sampling(frames, spatial_idx=spatial_sample_index, min_scale=min_scale, max_scale=max_scale, crop_size=crop_size, random_horizontal_flip=self.cfg.DATA.RANDOM_FLIP, inverse_uniform_sampling=self.cfg.DATA.INV_UNIFORM_SAMPLE, aspect_ratio=relative_aspect, scale=relative_scales, motion_shift=(self.cfg.DATA.TRAIN_JITTER_MOTION_SHIFT if (self.mode in ['train']) else False))
if self.rand_erase:
erase_transform = RandomErasing(self.cfg.AUG.RE_PROB, mode=self.cfg.AUG.RE_MODE, max_count=self.cfg.AUG.RE_COUNT, num_splits=self.cfg.AUG.RE_COUNT, device='cpu')
frames = frames.permute(1, 0, 2, 3)
frames = erase_transform(frames)
frames = frames.permute(1, 0, 2, 3)
return frames
def _frame_to_list_img(self, frames):
img_list = [transforms.ToPILImage()(frames[i]) for i in range(frames.size(0))]
return img_list
def _list_img_to_frames(self, img_list):
img_list = [transforms.ToTensor()(img) for img in img_list]
return torch.stack(img_list)
def __len__(self):
return self.num_videos
def num_videos(self):
return len(self._path_to_videos) |
def main():
for (i, evaluator) in enumerate(benchmarks):
print('\nBenchmark', i, ':')
print(evaluator)
evaluator.evaluate() |
class Combinations_setk(Combinations_msetk):
def _iterator(self, items, n):
for combination in itertools.combinations(items, n):
(yield list(combination))
def _iterator_zero(self):
(yield [])
def __iter__(self):
if (self.k == 0):
return self._iterator_zero()
else:
return self._iterator(self.mset, self.k)
def list(self) -> list:
return list(self)
def unrank(self, r):
return [self.mset[i] for i in from_rank(r, len(self.mset), self.k)]
def rank(self, x):
x = [self.mset.index(i) for i in x]
return rank(x, len(self.mset))
def cardinality(self) -> Integer:
return ZZ(binomial(len(self.mset), self.k)) |
class Data(Clustering, MetricComparisons):
def __init__(self, coordinates=None, distances=None, maxk=None, verbose=False, njobs=cores, working_memory=1024):
super().__init__(coordinates=coordinates, distances=distances, maxk=maxk, verbose=verbose, njobs=njobs)
def return_ids_kstar_gride(self, initial_id=None, n_iter=5, Dthr=23., d0=0.001, d1=1000, eps=1e-07):
if (initial_id is None):
self.compute_id_2NN()
else:
self.compute_distances()
self.set_id(initial_id)
ids = []
ids_err = []
kstars = []
log_likelihoods = []
for i in range(n_iter):
self.compute_kstar(Dthr)
print('iteration ', i)
print('id ', self.intrinsic_dim)
n2s = self.kstar
not_even = ((n2s % 2) != 0)
n2s[not_even] = (n2s[not_even] + 1)
assert (sum(((n2s % 2) != 0)) == 0)
n1s = (n2s / 2).astype(int)
mus = np.array([(self.distances[(i, n2)] / self.distances[(i, n1)]) for (i, (n1, n2)) in enumerate(zip(n1s, n2s))])
(id, id_err) = self._compute_id_gride_single_scale(d0, d1, mus, n1s, n2s, eps)
self.set_id(id)
log_lik = (- ut._neg_loglik(self.dtype, id, mus, n1s, n2s))
ids.append(id)
ids_err.append(id_err)
kstars.append(self.kstar)
log_likelihoods.append(log_lik)
ids = np.array(ids)
ids_err = np.array(ids_err)
kstars = np.array(kstars)
log_likelihoods = np.array(log_likelihoods)
id_scale = 0.0
for (i, (n1, n2)) in enumerate(zip(n1s, n2s)):
id_scale += self.distances[(i, n1)]
id_scale += self.distances[(i, n2)]
id_scale /= (2 * self.N)
self.intrinsic_dim = id
self.intrinsic_dim_err = id_err
self.intrinsic_dim_scale = id_scale
return (ids, ids_err, kstars, log_likelihoods)
def return_ids_kstar_binomial(self, initial_id=None, n_iter=5, Dthr=23., r=0.5):
if (initial_id is None):
self.compute_id_binomial_k(k=10, r=r)
id = self.intrinsic_dim
else:
self.compute_distances()
self.set_id(initial_id)
id = initial_id
ids = []
ids_err = []
kstars = []
log_likelihoods = []
for i in range(n_iter):
self.compute_kstar(Dthr)
print('iteration ', i)
print('id ', id)
rk = np.array([dd[self.kstar[j]] for (j, dd) in enumerate(self.distances)])
rn = (rk * r)
n = np.sum([(dd < rn[j]) for (j, dd) in enumerate(self.distances)], axis=1)
id = (np.log(((n.mean() - 1) / (self.kstar.mean() - 1))) / np.log(r))
id_err = ut._compute_binomial_cramerrao(id, self.kstar, r, self.N)
log_lik = ut.binomial_loglik(id, (self.kstar - 1), (n - 1), r)
ids.append(id)
ids_err.append(id_err)
kstars.append(self.kstar)
log_likelihoods.append(log_lik)
ids = np.array(ids)
ids_err = np.array(ids_err)
kstars = np.array(kstars)
log_likelihoods = np.array(log_likelihoods)
self.intrinsic_dim = id
self.intrinsic_dim_err = id_err
self.intrinsic_dim_scale = (0.5 * (rn.mean() + rk.mean()))
return (ids, ids_err, kstars, log_likelihoods) |
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair', 4, 'For C++11-compatibility, omit template arguments from make_pair OR use pair directly OR if appropriate, construct a pair directly') |
class SplitWordsMapperDefaultArgs(Mapper):
def run(self, text: str) -> FieldMap:
return dict(lower=text.lower(), words=text.split()) |
def intersect_sphere(line: ti.template(), sphere: ti.template()):
color1 = vec4(0)
color2 = vec4(0)
dist1 = inf
dist2 = inf
l = (sphere.center - line.pos)
l2 = l.dot(l)
r2 = (sphere.radius * sphere.radius)
tp = l.dot(line.dir)
out_of_sphere = (l2 > r2)
may_have_intersection = True
if ((- eps) < (l2 - r2) < eps):
if ((- eps) < tp < eps):
may_have_intersection = False
out_of_sphere = (tp < 0)
if ((tp < 0) and out_of_sphere):
may_have_intersection = False
if may_have_intersection:
d2 = (l2 - (tp * tp))
if (d2 <= r2):
tt = ti.sqrt((r2 - d2))
t1 = (tp - tt)
if (t1 > 0):
hit_pos1 = (line.pos + (line.dir * t1))
dist1 = t1
normal1 = normalize((hit_pos1 - sphere.center))
color1 = shading(sphere.color, normal1)
t2 = (tp + tt)
if (t2 > 0):
hit_pos2 = (line.pos + (line.dir * t2))
dist2 = t2
normal2 = normalize((hit_pos2 - sphere.center))
color2 = shading(sphere.color, normal2)
return (ColorWithDepth(color=color1, depth=dist1), ColorWithDepth(color=color2, depth=dist2)) |
def test_case_partial_deepcopy(swagger_20):
operation = APIOperation('/example/path', 'GET', {}, swagger_20)
media_type = 'application/json'
original_case = Case(operation=operation, media_type=media_type, path_parameters={'test': 'test'}, headers={'Content-Type': 'application/json'}, cookies={'TOKEN': 'secret'}, query={'a': 1}, body={'b': 1})
copied_case = original_case.partial_deepcopy()
copied_case.operation.path = '/overwritten/path'
copied_case.path_parameters['test'] = 'overwritten'
copied_case.headers['Content-Type'] = 'overwritten'
copied_case.cookies['TOKEN'] = 'overwritten'
copied_case.query['a'] = 'overwritten'
copied_case.body['b'] = 'overwritten'
assert (copied_case.media_type == media_type)
assert (original_case.operation.path == '/example/path')
assert (original_case.path_parameters['test'] == 'test')
assert (original_case.headers['Content-Type'] == 'application/json')
assert (original_case.cookies['TOKEN'] == 'secret')
assert (original_case.query['a'] == 1)
assert (original_case.body['b'] == 1) |
def add_version_to_conv_bias(net, init_net):
bias_count = defaultdict(int)
for op in net._net.op:
if (('Conv' in op.type) and (len(op.input) >= 3)):
bias_count[op.input[2]] += 1
bias_fill_op = {}
for op in init_net._net.op:
if (bias_count[op.output[0]] > 1):
bias_fill_op[op.output[0]] = op
bias_version = defaultdict(int)
for op in net._net.op:
if (('Conv' in op.type) and (len(op.input) >= 3)):
bias = op.input[2]
if (bias_count[bias] <= 1):
continue
version = bias_version[bias]
bias_version[bias] += 1
if (version == 0):
continue
new_bias = ((bias + '_v') + str(version))
fill_op = copy.deepcopy(bias_fill_op[bias])
fill_op.output[0] = new_bias
init_net._net.op.extend([fill_op])
op.input[2] = new_bias
net._net.external_input.append(new_bias) |
.parametrize('front, reference', [(tf.zeros(shape=(0, 2)), [[0.1, (- 0.65)], [(- 0.7), (- 0.1)]]), (tf.zeros(shape=(0, 3)), [4.0, 4.0, 4.0])])
def test_pareto_hypervolume_indicator_raises_for_empty_front(front: tf.Tensor, reference: list[float]) -> None:
pareto = Pareto(front)
with pytest.raises(ValueError):
pareto.hypervolume_indicator(tf.constant(reference)) |
def test_merge_full():
instr0 = ExecutedInstruction('foo', 0, 1, 2, 3, 4, 5)
stmt0 = MagicMock()
assert0 = ExecutedAssertion(0, 1, 2, stmt0)
trace0 = ExecutionTrace()
trace0.executed_code_objects.add(0)
trace0.executed_code_objects.add(1)
trace0.executed_predicates[0] = 9
trace0.executed_predicates[1] = 7
trace0.true_distances[0] = 6
trace0.true_distances[1] = 3
trace0.false_distances[0] = 0
trace0.false_distances[1] = 1
trace0.covered_line_ids = {0}
trace0.executed_instructions = [instr0]
trace0.executed_assertions = [assert0]
instr1 = ExecutedInstruction('bar', 1, 2, 3, 4, 5, 6)
stmt1 = MagicMock()
assert1 = ExecutedAssertion(1, 2, 3, stmt1)
trace1 = ExecutionTrace()
trace1.executed_code_objects.add(1)
trace1.executed_code_objects.add(2)
trace1.executed_predicates[1] = 5
trace1.executed_predicates[2] = 8
trace1.true_distances[1] = 19
trace1.true_distances[2] = 3
trace1.false_distances[1] = 234
trace1.false_distances[2] = 0
trace1.covered_line_ids = {1}
trace1.executed_instructions = [instr0, instr1]
trace1.executed_assertions = [assert1]
assert2 = ExecutedAssertion(1, 2, 4, stmt1)
result = ExecutionTrace()
result.executed_code_objects.add(0)
result.executed_code_objects.add(1)
result.executed_code_objects.add(2)
result.executed_predicates[0] = 9
result.executed_predicates[1] = 12
result.executed_predicates[2] = 8
result.true_distances[0] = 6
result.true_distances[1] = 3
result.true_distances[2] = 3
result.false_distances[0] = 0
result.false_distances[1] = 1
result.false_distances[2] = 0
result.covered_line_ids = {0, 1}
result.executed_instructions = [instr0, instr0, instr1]
result.executed_assertions = [assert0, assert2]
trace0.merge(trace1)
assert (trace0 == result) |
def JH(N, H):
key = ('JH(%s,%s)' % (N, H))
try:
return _get(key)
except ValueError:
from sage.modular.arithgroup.all import GammaH
return _saved(key, GammaH(N, H).modular_abelian_variety()) |
.parametrize('schema, expected', (({'properties': {'a': {'readOnly': True}}}, {'not': {'required': ['a']}}), ({'properties': {'a': {'readOnly': True}}, 'required': ['a']}, {'not': {'required': ['a']}})))
def test_rewrite_read_only(schema, expected):
rewrite_properties(schema, is_read_only)
assert (schema == expected) |
class ResNet_Block(nn.Module):
def __init__(self, in_c, in_o, opt, downsample=None):
super().__init__()
bn_noise1 = LinearNoiseLayer(opt, output_sz=in_c)
bn_noise2 = LinearNoiseLayer(opt, output_sz=in_o)
conv_layer = get_conv_layer(opt)
conv_aa = conv_layer(in_c, in_o, 3, 1, 1)
conv_ab = conv_layer(in_o, in_o, 3, 1, 1)
conv_b = conv_layer(in_c, in_o, 1, 0, 1)
if (downsample == 'Down'):
norm_downsample = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
elif (downsample == 'Up'):
norm_downsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
elif downsample:
norm_downsample = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
else:
norm_downsample = Identity()
self.ch_a = nn.Sequential(bn_noise1, nn.ReLU(), conv_aa, bn_noise2, nn.ReLU(), conv_ab, norm_downsample)
if (downsample or (in_c != in_o)):
self.ch_b = nn.Sequential(conv_b, norm_downsample)
else:
self.ch_b = Identity()
def forward(self, x):
x_a = self.ch_a(x)
x_b = self.ch_b(x)
return (x_a + x_b) |
class TestGaussian():
def test_basic(self):
assert_allclose(windows.gaussian(6, 1.0), [0., 0., 0., 0., 0., 0.])
assert_allclose(windows.gaussian(7, 1.2), [0., 0., 0., 1.0, 0., 0., 0.])
assert_allclose(windows.gaussian(7, 3), [0., 0., 0., 1.0, 0., 0., 0.])
assert_allclose(windows.gaussian(6, 3, False), [0., 0., 0., 1.0, 0., 0.]) |
class AND(sympy.Function):
def eval(cls, x, y):
if (x.is_Boolean and y.is_Boolean):
return (x and y)
def _eval_is_boolean(self):
return True |
.parametrize('checkpoint_path', ['Neural-HMM-Male.ckpt', 'Neural-HMM-Female.ckpt'])
def test_loading_checkpoint(checkpoint_path):
model = TrainingModule.load_from_checkpoint(checkpoint_path)
assert isinstance(model, pl.LightningModule) |
def copy_image_u8_to_rgba8(src: ti.template(), dst: ti.types.ndarray(), num_components: ti.template(), gray_scale: ti.template()):
for (i, j) in ti.ndrange(src.shape[0], src.shape[1]):
px = ti.Vector([0, 0, 0, 255], dt=u32)
if ti.static(gray_scale):
px[0] = px[1] = px[2] = ti.cast(src[(i, j)], u32)
else:
for k in ti.static(range(num_components)):
if ti.static((len(src.shape) == 3)):
px[k] = ti.cast(src[(i, j, k)], u32)
else:
px[k] = ti.cast(src[(i, j)][k], u32)
pack = ((((px[0] << 0) | (px[1] << 8)) | (px[2] << 16)) | (px[3] << 24))
dst[(i, j)] = pack |
class BatchNormStats2d(nn.Module):
def __init__(self, num_features, eps=1e-05, decay=0.1):
super(BatchNormStats2d, self).__init__()
self.eps = eps
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.decay = decay
def forward(self, x, training):
if training:
channels = x.transpose(0, 1).contiguous().view(x.size(1), (- 1))
(used_mean, used_var) = (channels.mean((- 1)), channels.var((- 1)))
(curr_mean, curr_var) = (used_mean, used_var)
self.running_mean = (self.running_mean - (self.decay * (self.running_mean - curr_mean)))
self.running_var = (self.running_var - (self.decay * (self.running_var - curr_var)))
else:
used_mean = self.running_mean
used_var = self.running_var
used_var += self.eps
used_mean = used_mean.view(1, x.size(1), 1, 1).expand_as(x)
used_var = used_var.view(1, x.size(1), 1, 1).expand_as(x)
return (used_mean, used_var) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='celeba', choices=['celeba', 'lsun'])
parser.add_argument('--url', default='datasets/celeba/celeba-{000000..000007}.tar')
parser.add_argument('--img_size', default=128, type=int)
parser.add_argument('--n_upsamplings', type=int, default=5)
parser.add_argument('--z_dim', type=int, default=128)
parser.add_argument('--dim', type=int, default=32)
parser.add_argument('--adversarial_loss_mode', default='wgan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
parser.add_argument('--gradient_penalty_mode', default='0-gp', choices=['none', '1-gp', '0-gp', 'lp'])
parser.add_argument('--gradient_penalty_sample_mode', default='line', choices=['line', 'real', 'fake', 'dragan'])
parser.add_argument('--gradient_penalty_weight', type=float, default=10.0)
parser.add_argument('--gradient_penalty_d_norm', default='layer_norm', choices=['instance_norm', 'layer_norm'])
parser.add_argument('--n_d', type=int, default=5)
parser.add_argument('--fid_measure_samples', type=int, default=10000)
parser.add_argument('--fid_ref_data_dir', default='datasets/celeba/fid_eval_10k/')
parser.add_argument('--inference_batch_size', type=int, default=2000)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--epochs', type=int, default=25)
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--sample_every', type=int, default=100)
parser.add_argument('--random_seed', type=int, default=2021)
parser.add_argument('--b1', type=float, default=0.5)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--output_dir', default='output/')
parser.add_argument('--setup_name', default='BASELINE')
parser.add_argument('--cudnn_benchmark', type=bool, default=True)
parser.add_argument('--gpus', type=int, default=1)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(os.path.join(args.output_dir, args.adversarial_loss_mode), exist_ok=True)
os.makedirs(os.path.join(args.output_dir, args.adversarial_loss_mode, args.setup_name), exist_ok=True)
save_dir = os.path.join(args.output_dir, args.adversarial_loss_mode, args.setup_name)
ckpt_dir = os.path.join(save_dir, 'checkpoints')
os.makedirs(ckpt_dir, exist_ok=True)
if (os.path.exists(ckpt_dir) and (len(os.listdir(ckpt_dir)) > 0)):
ckpt_files = glob.glob((ckpt_dir + '/*'))
latest_ckpt = max(ckpt_files, key=os.path.getctime)
resume_from_checkpoint = latest_ckpt
else:
resume_from_checkpoint = None
pl.seed_everything(args.random_seed)
image_transform = transforms.Compose([transforms.Resize(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
if ((args.dataset == 'celeba') or (args.dataset == 'lsun')):
data_module = CelebADataModule(args.url, args.img_size, image_transform, NUM_TRAINING_FILES[args.dataset], args.batch_size, args.num_workers)
model = GAN(latent_dim=args.z_dim, dim=args.dim, n_upsamplings=args.n_upsamplings, lr=args.lr, b1=args.b1, b2=args.b2, batch_size=args.batch_size, setup_name=args.setup_name, adversarial_mode=args.adversarial_loss_mode, gradient_penalty_mode=args.gradient_penalty_mode, gradient_penalty_sample_mode=args.gradient_penalty_sample_mode, gradient_penalty_weight=args.gradient_penalty_weight, gradient_penalty_d_norm=args.gradient_penalty_d_norm, n_d=args.n_d, sample_every=args.sample_every, expected_epochs=args.epochs, expected_dataset=args.dataset, fid_measure_samples=args.fid_measure_samples, fid_ref_data_dir=args.fid_ref_data_dir, inference_batch_size=args.inference_batch_size)
callbacks = [LearningRateMonitor(logging_interval='epoch'), GPUStatsMonitor(), ModelCheckpoint(dirpath=ckpt_dir, filename='{epoch}-{fid10k:.2f}', save_top_k=5, monitor='fid10k'), EarlyStopping(monitor='fid10k', min_delta=0.0, patience=5, verbose=True, mode='min')]
tb_logger = pl_loggers.TensorBoardLogger(args.output_dir, name=args.adversarial_loss_mode, version=args.setup_name)
accelerator = (None if (args.gpus == 1) else 'dp')
trainer = pl.Trainer(accelerator=accelerator, gpus=args.gpus, max_epochs=args.epochs, progress_bar_refresh_rate=20, deterministic=True, default_root_dir=save_dir, weights_save_path=save_dir, callbacks=callbacks, resume_from_checkpoint=resume_from_checkpoint, benchmark=True, logger=tb_logger)
with open(os.path.join(trainer.log_dir, 'config.txt'), 'w+') as f:
json.dump(args.__dict__, f, indent=2)
trainer.fit(model, data_module) |
def _invert_dict(d):
preimages = {}
for (k, v) in d.items():
preimages[v] = (preimages.get(v, []) + [k])
return preimages |
def create_optimizer(model, cfg, print_fn=None):
if (print_fn is None):
print_fn = print
if (cfg.OPTIMIZER.lower() == 'sgd'):
optimizer = torch.optim.SGD(model.parameters(), cfg.INITIAL_LR, cfg.MOMENTUM, weight_decay=cfg.WEIGHT_DECAY, nesterov=cfg.NESTEROV)
print_fn('Using SGD optimizer with momentum {} and weight decay {}'.format(cfg.MOMENTUM, cfg.WEIGHT_DECAY))
elif (cfg.OPTIMIZER.lower() == 'adam'):
optimizer = torch.optim.Adam(model.parameters(), cfg.INITIAL_LR, weight_decay=cfg.WEIGHT_DECAY)
print_fn('Using Adam optimizer with weight decay {}'.format(cfg.WEIGHT_DECAY))
else:
raise ValueError("Invalid optimizer choice: '{}'".format(cfg.OPTIMIZER))
return optimizer |
def largest_fundamental_disc_with_class_number(h):
h = Integer(h)
if (h <= 0):
return (Integer(0), Integer(0))
try:
(B, c) = watkins_table[h]
return (Integer(B), Integer(c))
except KeyError:
raise NotImplementedError(('largest fundamental discriminant not available for class number %s' % h)) |
class DenseAnnotationsReader(object):
def __init__(self, dense_annotations_jsonpath: str):
with open(dense_annotations_jsonpath, 'r') as visdial_file:
self._visdial_data = json.load(visdial_file)
self._image_ids = [entry['image_id'] for entry in self._visdial_data]
def __len__(self):
return len(self._image_ids)
def __getitem__(self, image_id: int) -> Dict[(str, Union[(int, List)])]:
index = self._image_ids.index(image_id)
return self._visdial_data[index]
def all_data(self):
return self._visdial_data
def keys(self) -> List[int]:
return self._image_ids
def split(self):
return 'val' |
class Configuration():
def __init__(self, config_dict):
with open(klpt.get_data('data/default-options.json'), encoding='utf-8') as options_file:
self.options = json.load(options_file)
self.unknown = None
if ('script' in config_dict):
self.validate_script(config_dict['script'])
else:
self.script = None
if ('dialect' in config_dict):
self.validate_dialect(config_dict['dialect'])
else:
self.dialect = None
if ('numeral' in config_dict):
self.validate_numeral(config_dict['numeral'])
else:
self.numeral = None
if ('target_script' in config_dict):
self.validate_target_script(config_dict['target_script'])
else:
self.target_script = None
if ('unknown' in config_dict):
self.validate_unknown(config_dict['unknown'])
else:
self.user_UNKNOWN = ''
def normalize_arguments(self, argument):
return argument.lower().capitalize()
def validate_script(self, script):
if (self.normalize_arguments(script) not in self.options['scripts']):
raise ValueError(f"Unknown script. Available options: {self.options['scripts']}")
else:
self.script = self.normalize_arguments(script)
def validate_dialect(self, dialect):
if (self.normalize_arguments(dialect) in self.options['dialects']):
self.dialect = self.normalize_arguments(dialect)
elif (self.normalize_arguments(dialect) in list(self.options['dialects'].values())):
self.dialect = dict(zip(self.options['dialects'].values(), self.options['dialects'].keys()))[self.normalize_arguments(dialect)]
else:
raise ValueError(f"Unknown dialect. Available options: {self.options['dialects']}")
def validate_numeral(self, numeral):
if (self.normalize_arguments(numeral) not in self.options['numerals']):
raise ValueError(f"Unknown numeral. Available options: {self.options['numerals']}")
else:
self.numeral = self.normalize_arguments(numeral).lower().capitalize()
def validate_target_script(self, target_script):
if (target_script == 'Latin'):
self.target_script = self.normalize_arguments(target_script)
self.mode = 'arabic_to_latin'
elif (target_script == 'Arabic'):
self.target_script = self.normalize_arguments(target_script)
self.mode = 'latin_to_arabic'
else:
raise ValueError(f"Unknown transliteration option. Available options: {self.options['transliterator']}")
def validate_unknown(self, unknown):
if len(unknown):
self.user_UNKNOWN = unknown
else:
raise ValueError(f'Unknown unknown tag. Select a non-empty token (e.g. <UNK>.') |
def register_Ns3TcpOptionSack_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::TcpOptionSack const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddSackBlock', 'void', [param('std::pair< ns3::SequenceNumber< unsigned int, int >, ns3::SequenceNumber< unsigned int, int > >', 's')])
cls.add_method('ClearSackList', 'void', [])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetKind', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetNumSackBlocks', 'uint32_t', [], is_const=True)
cls.add_method('GetSackList', 'ns3::TcpOptionSack::SackList', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
return |
class losses_saver():
def __init__(self, opt):
self.name_list = ['Generator', 'Vgg', 'D_fake', 'D_real', 'LabelMix']
self.opt = opt
self.freq_smooth_loss = opt.freq_smooth_loss
self.freq_save_loss = opt.freq_save_loss
self.losses = dict()
self.cur_estimates = np.zeros(len(self.name_list))
self.path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'losses')
self.is_first = True
os.makedirs(self.path, exist_ok=True)
for name in self.name_list:
if opt.continue_train:
self.losses[name] = np.load((self.path + '/losses.npy'), allow_pickle=True).item()[name]
else:
self.losses[name] = list()
def __call__(self, epoch, losses):
for (i, loss) in enumerate(losses):
if (loss is None):
self.cur_estimates[i] = None
else:
self.cur_estimates[i] += loss.detach().cpu().numpy()
if ((epoch % self.freq_smooth_loss) == (self.freq_smooth_loss - 1)):
for (i, loss) in enumerate(losses):
if (not (self.cur_estimates[i] is None)):
self.losses[self.name_list[i]].append((self.cur_estimates[i] / self.opt.freq_smooth_loss))
self.cur_estimates[i] = 0
if ((epoch % self.freq_save_loss) == (self.freq_save_loss - 1)):
self.plot_losses()
np.save(os.path.join(self.opt.checkpoints_dir, self.opt.name, 'losses', 'losses'), self.losses)
def plot_losses(self):
for curve in self.losses:
(fig, ax) = plt.subplots(1)
n = (np.array(range(len(self.losses[curve]))) * self.opt.freq_smooth_loss)
plt.plot(n[1:], self.losses[curve][1:])
plt.ylabel('loss')
plt.xlabel('epochs')
plt.savefig(os.path.join(self.opt.checkpoints_dir, self.opt.name, 'losses', ('%s.png' % curve)), dpi=600)
plt.close(fig)
(fig, ax) = plt.subplots(1)
for curve in self.losses:
if np.isnan(self.losses[curve][0]):
continue
plt.plot(n[1:], self.losses[curve][1:], label=curve)
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend(loc='upper right')
plt.savefig(os.path.join(self.opt.checkpoints_dir, self.opt.name, 'losses', 'combined.png'), dpi=600)
plt.close(fig) |
def build_datamanager(cfg):
if (cfg.data.type == 'image'):
return ImageDataManager(**imagedata_kwargs(cfg))
else:
return torchreid.data.VideoDataManager(**videodata_kwargs(cfg)) |
def make_optimizer(cfg, model):
logger = logging.getLogger('atss_core.trainer')
params = []
for (key, value) in model.named_parameters():
if (not value.requires_grad):
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if ('bias' in key):
lr = (cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR)
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
if (key.endswith('.offset.weight') or key.endswith('.offset.bias')):
logger.info('set lr factor of {} as {}'.format(key, cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR))
lr *= cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR
params += [{'params': [value], 'lr': lr, 'weight_decay': weight_decay}]
optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
return optimizer |
class AttrFunctor(object):
def __init__(self, inputs: list=[], attrs: list=[], func=(lambda x: x)):
assert (len(inputs) == len(attrs))
self.inputs = inputs
self.attrs = attrs
self.func = func |
def create_evaluate_result_table(datasource, result_table, metrics):
table_ops.drop_tables([result_table], datasource)
ext_metrics = ['loss']
if isinstance(metrics, list):
ext_metrics.extend(metrics)
fields = [('%s STRING' % m) for m in ext_metrics]
sql = ('CREATE TABLE IF NOT EXISTS %s (%s);' % (result_table, ','.join(fields)))
conn = db.connect_with_data_source(datasource)
conn.execute(sql) |
def graph_transform_ps(single_gpu_meta_graph_def, worker_id, config, op_library_path=None):
cluster_info = config.resource_info
if (config.communication_config.ps_config.replicate_variables and (not config.sync)):
raise ValueError('replicate_variables is only possible with sync')
ps_device = ('/job:ps' if ('ps' in cluster_info) else '/job:worker/cpu:0')
cluster_spec = get_tf_clusterspec(cluster_info)
worker = cluster_info['worker'][worker_id]
num_gpus = len(worker['gpus'])
parallax_log.debug(('Starting graph transformation for PS for worker %d' % worker_id))
tensor_or_op_name_to_replica_names = TensorOrOpNameToReplicaNames(single_gpu_meta_graph_def.meta_info_def.stripped_op_list)
multi_gpu_meta_graph_def = in_graph_auto_parallel_compute(single_gpu_meta_graph_def, num_gpus, config=config, op_library_path=op_library_path, tensor_or_op_name_to_replica_names=tensor_or_op_name_to_replica_names)
ps_meta_graph_def = between_graph_auto_parallel_compute(multi_gpu_meta_graph_def, worker_id=worker_id, ps_device=ps_device, worker_device=('/job:worker/task:%d' % worker_id), merge_devices=True, cluster_spec=cluster_spec, config=config, op_library_path=op_library_path, num_replicas_per_worker=num_gpus, tensor_or_op_name_to_replica_names=tensor_or_op_name_to_replica_names)
parallax_log.debug(('Finished graph transformation for PS for worker %d' % worker_id))
return (ps_meta_graph_def, tensor_or_op_name_to_replica_names.export()) |
def convert_speaker_meta_keys(speaker_meta):
return {SPEAKER_META_MAP.get(key, key): value for (key, value) in speaker_meta.items()} |
.parametrize('symbol, typ, result', [(sym, dict, dict[(Any, int)]) for sym in InferredSignature._DICT_VALUE_FROM_ARGUMENT_TYPES])
def test_guess_generic_types_dict_value_from_arguments(inferred_signature, symbol, typ, result):
config.configuration.test_creation.negate_type = 0.0
knowledge = UsageTraceNode('ROOT')
knowledge.children[symbol].arg_types[1].add(int)
with mock.patch('pynguin.utils.randomness.choice') as choice_mock:
choice_mock.side_effect = (lambda x: x[0])
assert (inferred_signature._guess_generic_type_parameters_for_builtins(inferred_signature.type_system.convert_type_hint(typ), knowledge, 0) == inferred_signature.type_system.convert_type_hint(result)) |
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str]=None):
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_coco_json(annotations_json_file=annotations_fpath, image_root=images_root, dataset_name=dataset_data.name)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(json_file=annotations_fpath, image_root=images_root, **get_metadata(DENSEPOSE_METADATA_URL_PREFIX)) |
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, p):
super(Encoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
def forward(self, x):
embedding = self.dropout(torch.relu(self.embedding(x)))
(outputs, (hidden, cell)) = self.rnn(embedding)
return (hidden, cell) |
class BarthezTokenizer():
def __init__(self, *args, **kwargs):
requires_sentencepiece(self)
def from_pretrained(self, *args, **kwargs):
requires_sentencepiece(self) |
_utils.test(require=ti.extension.sparse, exclude=[ti.opengl, ti.gles, ti.vulkan, ti.metal])
def test_dense_dynamic():
n = 128
x = ti.field(ti.i32)
ti.root.dense(ti.i, n).dynamic(ti.j, n, 128).place(x)
def append():
for i in range(n):
for j in range(i):
ti.append(x.parent(), i, (j * 2))
append()
for i in range(n):
for j in range(i):
assert (x[(i, j)] == (j * 2)) |
def test_aws_singlepart_zero_bytes():
assert interface_test_framework('aws:us-east-1', f'test-skyplane-{uuid.uuid4()}', False, test_delete_bucket=True, file_size_mb=0) |
def add_sssp_edges_for_op(ssspG, vars, op, index, in_vars, out_vars, binding, split_idx, prev_split_idx=None):
prev_split_idx = (prev_split_idx or split_idx)
layouts = vars.get_valid_unique_layouts(op.name, (tuple(in_vars) + tuple(out_vars)), binding=freeze_dict(binding))
num_layouts = layout_len(layouts)
print(f'{op.name}: Input vars: {in_vars} | output vars {out_vars} | {num_layouts} layouts')
for cfg_idx in range(num_layouts):
cfg_binding = dict(binding)
for var in (in_vars | out_vars):
cfg_binding = vars.set_var_binding(cfg_binding, var, layouts[var][cfg_idx])
cfg = vars.get_op_config_from_binding(op.name, cfg_binding)
in_cfg = freeze_dict({var: cfg[var] for var in in_vars})
in_node = (f'{prev_split_idx}_{index}', in_cfg)
if ((in_node not in ssspG.nodes) and (index > 0)):
raise RuntimeError(f'{op.name} trying to add edge but source {in_node} does not exist!')
out_cfg = freeze_dict({var: cfg[var] for var in out_vars})
out_node = (f'{split_idx}_{(index + 1)}', out_cfg)
weight = op.get_min_config(cfg).time
ssspG.add_edge(in_node, out_node, weight=weight, cfg=cfg, op=op) |
def adam_init(optimizer):
for pg in optimizer.param_groups:
for p in pg['params']:
state = optimizer.state[p]
if (len(state) == 0):
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['step'] = 0 |
class ToyText(nn.Module):
def __init__(self, hidden_size):
super(ToyText, self).__init__()
self.relu = nn.ReLU()
self.fc = nn.Linear(512, hidden_size)
def forward(self, text):
out = self.fc(self.relu(text))
return out |
def depth_for_vis(depth, valid_start=0.2, valid_end=1.0):
mask = (depth > 0)
depth_n = depth.astype(np.float)
depth_n[mask] -= depth_n[mask].min()
depth_n[mask] /= (depth_n[mask].max() / (valid_end - valid_start))
depth_n[mask] += valid_start
return depth_n |
def statistics_avg_duration(all_data, data, dialogue_id='Dialogue_ID', StartTime='StartTime', EndTime='EndTime'):
keys = list(set(data[dialogue_id]))
count_dial_time = []
for key in keys:
start_time = all_data[(all_data[dialogue_id] == key)][StartTime]
end_time = all_data[(all_data[dialogue_id] == key)][EndTime]
time_list = (np.array([int(s) for s in end_time.tolist()]) - np.array([int(s) for s in start_time.tolist()]))
time_list = time_list.tolist()
count_dial_time.append((sum(time_list) / len(time_list)))
return {'avg': (sum(count_dial_time) / len(count_dial_time))} |
def test__setup_report_dir_not_required(tmp_path: Path):
path = ((tmp_path / 'foo') / 'bar')
config.configuration.statistics_output.report_dir = path.absolute()
config.configuration.statistics_output.create_coverage_report = False
config.configuration.statistics_output.statistics_backend = config.StatisticsBackend.NONE
assert gen._setup_report_dir()
assert (not path.exists()) |
.entry
def test_train_lm():
with tempfile.TemporaryDirectory() as tmpdir:
data_config = tiny_test_corpus.tiny_corpus_config(tmpdir)
try:
config = train_lm.TrainLmConfig(data=data_config, model=train_lm.Gpt2Config(num_layers=2, num_heads=2, seq_len=32, hidden_dim=32), trainer=train_lm.TrainerConfig(num_train_steps=2, train_batch_size=len(jax.devices()), max_eval_batches=1, wandb=WandbConfig(mode='disabled'), require_accelerator=False, ray=RayConfig(auto_start_cluster=False)))
train_lm.main(config)
finally:
try:
os.unlink('wandb')
except Exception:
pass |
def worker_function(local_rank, world_size):
print('-I- my local_rank is', local_rank)
import os
os.environ['OMPI_COMM_WORLD_SIZE'] = str(world_size)
os.environ['OMPI_COMM_WORLD_RANK'] = str(local_rank)
os.environ['OMPI_COMM_WORLD_LOCAL_RANK'] = str(local_rank)
os.environ['OMPI_UNIVERSE_SIZE'] = str(world_size)
os.environ['OMPI_COMM_WORLD_LOCAL_SIZE'] = str(world_size)
os.environ['OMPI_COMM_WORLD_NODE_RANK'] = str(1)
import torch.distributed as dist
current_env = os.environ
current_env['MASTER_ADDR'] = '127.0.0.1'
current_env['MASTER_PORT'] = str(29500)
current_env['WORLD_SIZE'] = str(world_size)
current_env['RANK'] = str(local_rank)
dist.init_process_group(backend='mpi', world_size=world_size)
print(dist.get_world_size()) |
.parametrize('condition, cls', [pytest.param('maximum_test_executions', MaxTestExecutionsStoppingCondition), pytest.param('maximum_statement_executions', MaxStatementExecutionsStoppingCondition), pytest.param('maximum_search_time', MaxSearchTimeStoppingCondition), pytest.param('maximum_iterations', MaxIterationsStoppingCondition), pytest.param('maximum_coverage', MaxCoverageStoppingCondition), pytest.param('maximum_coverage_plateau', CoveragePlateauStoppingCondition)])
def test_stopping_condition(condition, cls, algorithm_factory):
setattr(config.configuration.stopping, condition, 5)
strategy = algorithm_factory.get_search_algorithm()
assert isinstance(strategy.stopping_conditions[0], cls) |
def get_json_path(data_dir: str, data_type: str, split: str='1.0') -> str:
json_path = f'{data_dir}/visdial_{split}_{data_type}.json'
return json_path |
def IsOperatorWithEngine(op_type, engine):
TriggerLazyImport()
return (C.op_registry_key(op_type, engine) in _REGISTERED_OPERATORS) |
def softmax_kernel(data, *, projection_matrix, is_query, softmax_temp=None, eps=0.0001):
(b, h, _, d) = data.shape
if (softmax_temp is None):
softmax_temp = (1 / math.sqrt(d))
data_normalizer = math.sqrt(softmax_temp)
ratio = (projection_matrix.shape[0] ** (- 0.5))
projection = repeat(projection_matrix, 'j d -> b h j d', b=b, h=h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
diag_data = (data ** 2)
diag_data = torch.sum(diag_data, dim=(- 1))
diag_data = ((diag_data / 2.0) * (data_normalizer ** 2))
diag_data = diag_data.unsqueeze(dim=(- 1))
if is_query:
data_dash = (ratio * (torch.exp(((data_dash - diag_data) - torch.max(data_dash, dim=(- 1), keepdim=True).values)) + eps))
else:
data_dash = (ratio * (torch.exp(((data_dash - diag_data) - torch.max(data_dash))) + eps))
return data_dash.type_as(data) |
def maybe_find_symengine_wrapper(build_dir: Path, ext_filename: str) -> T.Optional[Path]:
symengine_wrapper_candidates = list(build_dir.glob(f'symengine_install/**/lib/python{sys.version_info.major}.{sys.version_info.minor}/*-packages/symengine/lib/{ext_filename}'))
if (len(symengine_wrapper_candidates) > 1):
raise FileNotFoundError(f'Expected to find exactly one symengine_wrapper.so, but found {len(symengine_wrapper_candidates)}: {symengine_wrapper_candidates}')
return next(iter(symengine_wrapper_candidates), None) |
def build_scheduler(config, optimizer, n_iter_per_epoch):
num_steps = int((config.TRAIN.EPOCHS * n_iter_per_epoch))
warmup_steps = int((config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch))
decay_steps = int((config.TRAIN.LR_SCHEDULER.DECAY_EPOCHS * n_iter_per_epoch))
lr_scheduler = None
if (config.TRAIN.LR_SCHEDULER.NAME == 'cosine'):
lr_scheduler = CosineLRScheduler(optimizer, t_initial=num_steps, t_mul=1.0, lr_min=config.TRAIN.MIN_LR, warmup_lr_init=config.TRAIN.WARMUP_LR, warmup_t=warmup_steps, cycle_limit=1, t_in_epochs=False)
elif (config.TRAIN.LR_SCHEDULER.NAME == 'linear'):
lr_scheduler = LinearLRScheduler(optimizer, t_initial=num_steps, lr_min_rate=0.01, warmup_lr_init=config.TRAIN.WARMUP_LR, warmup_t=warmup_steps, t_in_epochs=False)
elif (config.TRAIN.LR_SCHEDULER.NAME == 'step'):
lr_scheduler = StepLRScheduler(optimizer, decay_t=decay_steps, decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE, warmup_lr_init=config.TRAIN.WARMUP_LR, warmup_t=warmup_steps, t_in_epochs=False)
return lr_scheduler |
def DM_51_6_1():
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(51)
M = [[5, 33, 29, 30, 1], [8, 3, 47, 10, 13], [14, 27, 6, 12, 28], [9, 16, 44, 49, 11], [34, 32, 36, 26, 20]]
Mb = [[0, 0, 0, 0, 0]]
for R in zip(*M):
for i in range(5):
for RR in [list(R), [(- x) for x in R]]:
Mb.append(RR)
R = cyclic_shift(R, 1)
for R in Mb:
R.append(0)
return (G, Mb) |
def aggregate_ids_with_embeddings(q_ids_w_emb: dict, aggregation_mode: str):
if (aggregation_mode == 'avg'):
q_ids_agg_emb = aggregate_emb_avg(q_ids_w_emb)
elif (aggregation_mode == 'sum'):
q_ids_agg_emb = aggregate_emb_sum(q_ids_w_emb)
elif (aggregation_mode == 'max'):
q_ids_agg_emb = aggregate_emb_max(q_ids_w_emb)
elif (aggregation_mode == 'min'):
q_ids_agg_emb = aggregate_emb_min(q_ids_w_emb)
elif (aggregation_mode == 'vrrf'):
q_ids_agg_emb = aggregate_emb_vrrf(q_ids_w_emb)
elif (aggregation_mode == 'vscores'):
q_ids_agg_emb = aggregate_emb_scores(q_ids_w_emb)
elif (aggregation_mode == 'vranks'):
q_ids_agg_emb = aggregate_emb_scores(q_ids_w_emb)
else:
print('No valid aggregation mode entered')
return None
return q_ids_agg_emb |
def load_entity_vocab(data_dir, ignore_bad_title=True, min_ent_count=1):
entity_vocab = {}
bad_title = 0
few_entity = 0
with open(os.path.join(data_dir, 'entity_vocab.txt'), 'r', encoding='utf-8') as f:
for line in f:
(_, entity_id, entity_title, entity_mid, count) = line.strip().split('\t')
if (ignore_bad_title and (entity_title == '')):
bad_title += 1
elif (int(count) < min_ent_count):
few_entity += 1
else:
entity_vocab[len(entity_vocab)] = {'wiki_id': int(entity_id), 'wiki_title': entity_title, 'mid': entity_mid, 'count': count}
print(('total number of entity: %d\nremove because of empty title: %d\nremove because count<%d: %d' % (len(entity_vocab), bad_title, min_ent_count, few_entity)))
return entity_vocab |
def findNearbyBroker():
global profile, discoveryURL
nearby = {}
nearby['latitude'] = profile['location']['latitude']
nearby['longitude'] = profile['location']['longitude']
nearby['limit'] = 1
discoveryReq = {}
discoveryReq['entities'] = [{'type': 'IoTBroker', 'isPattern': True}]
discoveryReq['restriction'] = {'scopes': [{'scopeType': 'nearby', 'scopeValue': nearby}]}
discoveryURL = profile['discoveryURL']
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
response = requests.post((discoveryURL + '/discoverContextAvailability'), data=json.dumps(discoveryReq), headers=headers)
if (response.status_code != 200):
print('failed to find a nearby IoT Broker')
return ''
print(response.text)
registrations = json.loads(response.text)
for registration in registrations['contextRegistrationResponses']:
providerURL = registration['contextRegistration']['providingApplication']
if (providerURL != ''):
return providerURL
return '' |
()
_option(__version__, '--version', '-v', package_name='showyourwork', message='%(version)s')
def main():
pass |
def _variable_with_weight_decay(name, shape, stddev, wd):
var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var |
def load_waveglow(waveglow_path):
waveglow = torch.load(waveglow_path)['model']
waveglow = waveglow.cuda().eval().half()
for k in waveglow.convinv:
k.float()
denoiser = Denoiser(waveglow)
return (waveglow, denoiser) |
def extract_result_build(conf):
buildFolder = os.path.join(PROJECT_CONFIG['build_dir'], conf.build_folder())
xoccFolder = '_x'
if (not os.path.exists(os.path.join(buildFolder, xoccFolder))):
conf.consumption = Consumption(conf, 'no_intermediate', None, None, None, None, None)
return
kernelFolder = os.path.join(buildFolder, xoccFolder, 'link', 'vivado')
implFolder = os.path.join(kernelFolder, 'prj', 'prj.runs', 'impl_1')
if (not os.path.exists(implFolder)):
conf.consumption = Consumption(conf, 'no_build', None, None, None, None, None)
return
status = check_build_status(conf)
reportPath = os.path.join(implFolder, 'kernel_util_routed.rpt')
if (not os.path.isfile(reportPath)):
conf.consumption = Consumption(conf, status, None, None, None, None, None)
return
report = open(reportPath).read()
pattern = 'Used Resources\\s*\\|\\s*(\\d+)[^\\|]+\\|\\s*\\d+[^\\|]+\\|\\s*(\\d+)[^\\|]+\\|\\s*(\\d+)[^\\|]+\\|\\s*\\d+[^\\|]+\\|\\s*(\\d+)[^\\|]+\\|'
matches = re.search(pattern, report)
luts = matches.group(1)
regs = matches.group(2)
bram = matches.group(3)
dsp = matches.group(4)
try:
with open(os.path.join(kernelFolder, 'vivado_warning.txt'), 'r') as clockFile:
warningText = clockFile.read()
m = re.search('automatically changed to ([0-9]+) MHz', warningText)
if m:
clock = int(m.group(1))
else:
clock = conf.frequency
except FileNotFoundError:
clock = conf.frequency
conf.consumption = Consumption(conf, status, luts, regs, dsp, bram, clock) |
def l2_regularization_loss(variables, weight_decay):
l2_losses = [tf.nn.l2_loss(var) for var in variables]
total_l2_loss = (weight_decay * tf.add_n(l2_losses))
return total_l2_loss |
class Evaluator():
def __call__(self, args: EvaluatorArgs, prev_ckpt_ind=(- 1), num_frames=0):
logger.info('CUDA_VISIBLE_DEVICES: {}'.format(os.environ['CUDA_VISIBLE_DEVICES']))
logger.info('Hostname: {}'.format(socket.gethostname()))
config = get_config(args.exp_config, args.opts)
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert (trainer_init is not None), f'{config.TRAINER_NAME} is not supported'
self.trainer = trainer_init(config)
self.trainer.prev_ckpt_ind = prev_ckpt_ind
self.trainer.num_frames = num_frames
self.trainer.eval()
def checkpoint(self, args, prev_ckpt_ind=(- 1), num_frames=0):
return submitit.helpers.DelayedSubmission(Evaluator(), args, self.trainer.prev_ckpt_ind, self.trainer.num_frames) |
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 1, 3])
return image |
def resnet34(**kwargs):
model = PreActivationResNet(PreActivationBasicBlock, [3, 4, 6, 3], **kwargs)
return model |
class Run():
states: list
times: list
def __post_init__(self):
if (len(self.states) != len(self.times)):
msg = 'Input states and times must be the same length! {} \neq {}'
raise ValueError(msg.format(len(self.states), len(self.times)))
def __getitem__(self, time):
time_index = np.argmin(np.abs((np.array(self.times) - time)))
return self.states[time_index]
def initial_state(self):
return self.states[0]
def initial_time(self):
return self.times[0]
def final_state(self):
return self.states[(- 1)]
def final_time(self):
return self.times[(- 1)] |
def make_sent_dataset():
train_src_file = './para-train.txt'
train_trg_file = './tgt-train.txt'
embedding_file = './glove.840B.300d.txt'
embedding = './embedding.pkl'
word2idx_file = './word2idx.pkl'
word2idx = make_vocab(train_src_file, train_trg_file, word2idx_file, config.vocab_size)
make_embedding(embedding_file, embedding, word2idx) |
def test_node2vec_apply():
node2vec = Node2Vec(emb_size=4, node_num=4, multiplicity=2)
x = np.array([[1]])
expected = np.array([[1, 1, 1, 1]])
inp = keras.Input(shape=(1,))
out = node2vec(inp, 'target')
model1 = keras.Model(inputs=inp, outputs=out)
model_weights1 = [np.ones_like(w) for w in model1.get_weights()]
model1.set_weights(model_weights1)
actual = model1.predict(x)
assert (expected == pytest.approx(actual))
x1 = np.array([[0]])
x2 = np.array([[2]])
y1 = np.array([[1, 1, 1, 1]])
y2 = np.array([[1, 1, 1, 1]])
(xinp, xout) = node2vec.in_out_tensors()
model2 = keras.Model(inputs=xinp, outputs=xout)
model_weights2 = [np.ones_like(w) for w in model2.get_weights()]
model2.set_weights(model_weights2)
actual = model2.predict([x1, x2])
assert (pytest.approx(y1) == actual[0])
assert (pytest.approx(y2) == actual[1]) |
def default_collate(batch):
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if (torch.utils.data.get_worker_info() is not None):
numel = sum((x.numel() for x in batch))
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
if ((elem_type.__name__ == 'ndarray') or (elem_type.__name__ == 'memmap')):
if (np_str_obj_array_pattern.search(elem.dtype.str) is not None):
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif (elem.shape == ()):
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif (isinstance(elem, tuple) and hasattr(elem, '_fields')):
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
it = iter(batch)
elem_size = len(next(it))
if (not all(((len(elem) == elem_size) for elem in it))):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type)) |
def decorate_args_and_kwargs_to_deivce(func, device):
def to_device_if_tensor(obj):
return (obj.to(device) if isinstance(obj, torch.Tensor) else obj)
def wrapper(*args, **kwargs):
args = [to_device_if_tensor(x) for x in args]
kwargs = {k: to_device_if_tensor(v) for (k, v) in kwargs.items()}
return func(*args, **kwargs)
return wrapper |
class HallLittlewood(UniqueRepresentation):
def __repr__(self):
return (self._name + (' over %s' % self._sym.base_ring()))
def __init__(self, Sym, t='t'):
self._sym = Sym
self.t = Sym.base_ring()(t)
self._name_suffix = ''
if (str(t) != 't'):
self._name_suffix += (' with t=%s' % t)
self._name = ('Hall-Littlewood polynomials' + self._name_suffix)
def symmetric_function_ring(self):
return self._sym
def base_ring(self):
return self._sym.base_ring()
def P(self):
return HallLittlewood_p(self)
def Q(self):
return HallLittlewood_q(self)
def Qp(self):
return HallLittlewood_qp(self) |
class SawyerReachPushPickPlaceEnv(SawyerXYZEnv):
def __init__(self, task_type, full_state_reward=False):
liftThresh = 0.04
goal_low = ((- 0.1), 0.8, 0.05)
goal_high = (0.1, 0.9, 0.3)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.6, 0.02)
obj_high = (0.1, 0.7, 0.02)
self.full_state_reward = full_state_reward
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.task_type = task_type
if (self.task_type == 'pick_place'):
self.goal = np.array([0.1, 0.8, 0.2])
elif (self.task_type == 'reach'):
self.goal = np.array([(- 0.1), 0.8, 0.2])
elif (self.task_type == 'push'):
self.goal = np.array([0.1, 0.8, 0.02])
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0, 0.6, 0.02]), 'hand_init_pos': np.array([0, 0.6, 0.2])}
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.goal_idx = 0
def _set_task_inner(self, *, task_type, **kwargs):
super()._set_task_inner(**kwargs)
self.task_type = task_type
if (self.task_type == 'pick_place'):
self.goal = np.array([0.1, 0.8, 0.2])
elif (self.task_type == 'reach'):
self.goal = np.array([(- 0.1), 0.8, 0.2])
elif (self.task_type == 'push'):
self.goal = np.array([0.1, 0.8, 0.02])
else:
raise NotImplementedError
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_reach_push_pick_and_place.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, success, hand_distance, goal_distance) = self.compute_reward(ob, self.goal_idx)
self.curr_path_length += 1
info = {'metric_reward': reward, 'metric_success': success, 'metric_hand_distance': hand_distance, 'metric_goal_distance': goal_distance}
return (ob, reward, False, info)
def _target_site_config(self):
far_away = np.array([10.0, 10.0, 10.0])
return [(('goal_' + self.task_type), self._target_pos)]
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def adjust_initObjPos(self, orig_init_pos):
diff = (self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2])
adjustedPos = (orig_init_pos[:2] + diff)
return [adjustedPos[0], adjustedPos[1], self.data.get_geom_xpos('objGeom')[(- 1)]]
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = (self.objHeight + self.liftThresh)
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
while (np.linalg.norm((goal_pos[:2] - self._target_pos[:2])) < 0.15):
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
if (self.task_type == 'push'):
self._target_pos = np.concatenate((goal_pos[(- 3):(- 1)], [self.obj_init_pos[(- 1)]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]]))
else:
self._target_pos = goal_pos[(- 3):]
self.obj_init_pos = goal_pos[:3]
self._set_obj_xyz(self.obj_init_pos)
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.pickCompleted = False
def add_pertask_success(self, obs, goal_idx=None):
goal_idxs = ([goal_idx] if (goal_idx is not None) else range(len(self.goals)))
for goal_idx in goal_idxs:
(reward, success, hand_distance, goal_distance) = self.compute_reward(obs['state'], goal_idx)
obs[('metric_reward/goal_' + str(goal_idx))] = reward
obs[('metric_success/goal_' + str(goal_idx))] = success
obs[('metric_hand_distance/goal_' + str(goal_idx))] = hand_distance
obs[('metric_goal_distance/goal_' + str(goal_idx))] = goal_distance
return obs
def compute_reward(self, obs, goal_idx):
if (goal_idx is None):
goal_idx = self.goal_idx
goal = self.goals[goal_idx]
if self.full_state_reward:
hand_distance = np.linalg.norm((obs[:3] - self.hand_init_pos))
else:
hand_distance = np.linalg.norm((obs[:3] - goal[:3]))
obj_distance = np.linalg.norm((obs[3:6] - goal[:3]))
if (self.task_type == 'reach'):
reward = (- hand_distance)
success = float((hand_distance < 0.05))
goal_distance = hand_distance
else:
reward = ((- hand_distance) - obj_distance)
success = float((obj_distance < 0.07))
goal_distance = obj_distance
success = (float((goal_distance <= 0.05)) if (self.task_type == 'reach') else float((goal_distance <= 0.07)))
return [reward, success, hand_distance, goal_distance] |
def none_parameters_factory(parameters: Sequence[(JSONMapping | None)], n_outputs: int) -> List[(JSONMapping | None)]:
return ([None] * n_outputs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.