code stringlengths 101 5.91M |
|---|
def aa_to_quat(axis_angle: Union[(torch.Tensor, numpy.ndarray)]) -> Union[(torch.Tensor, numpy.ndarray)]:
if (axis_angle.shape[(- 1)] != 3):
raise ValueError(f'Invalid input axis angles f{axis_angle.shape}.')
t = Compose([axis_angle_to_quaternion])
return t(axis_angle) |
def withClass(classname, namespace=''):
classattr = (('%s:class' % namespace) if namespace else 'class')
return withAttribute(**{classattr: classname}) |
def register_Ns3DesMetrics_methods(root_module, cls):
cls.add_method('Initialize', 'void', [param('int', 'argc'), param('char * *', 'argv'), param('std::string', 'outDir', default_value='""')])
cls.add_method('Trace', 'void', [param('ns3::Time const &', 'now'), param('ns3::Time const &', 'delay')])
cls.add_method('TraceWithContext', 'void', [param('uint32_t', 'context'), param('ns3::Time const &', 'now'), param('ns3::Time const &', 'delay')])
cls.add_constructor([])
return |
def symbolic_override(symbolic_fn):
return functools.partial(_symbolic_override_wrapper_maker, symbolic_fn, (lambda x: True)) |
def register_Ns3ArfWifiManager_methods(root_module, cls):
cls.add_constructor([param('ns3::ArfWifiManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetHeSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetHtSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetVhtSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('DoCreateStation', 'ns3::WifiRemoteStation *', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetDataTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoGetRtsTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ackSnr'), param('ns3::WifiMode', 'ackMode'), param('double', 'dataSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ctsSnr'), param('ns3::WifiMode', 'ctsMode'), param('double', 'rtsSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportRxOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'rxSnr'), param('ns3::WifiMode', 'txMode')], visibility='private', is_virtual=True)
cls.add_method('IsLowLatency', 'bool', [], is_const=True, visibility='private', is_virtual=True)
return |
def process_str_value(v: str) -> str:
if ((len(v) > 0) and (v[0] in QUOTE_CHARS)):
v = v[1:]
if ((len(v) > 0) and (v[(- 1)] in QUOTE_CHARS)):
v = v[:(- 1)]
for c in QUOTE_CHARS:
v = v.replace((c + c), c)
return v |
def get_circle_coordinates(r: float, degree: float):
if ((degree < 0) or (degree > 360)):
raise ValueError
radian = (((degree / 360) * 2) * np.pi)
x = (r * np.sin(radian))
y = (r * np.cos(radian))
return (x, y) |
def getContent(request):
if (request.method == 'GET'):
if request.user.is_authenticated:
ACCESS_DENIED = False
else:
ACCESS_DENIED = True
if ACCESS_DENIED:
return redirect('/login')
else:
return render(request, 'content.html')
else:
return render(request, 'content.html') |
_model
def metaformer_ppff_s12_224(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
token_mixers = [Pooling, Pooling, partial(SpatialFc, spatial_shape=[14, 14]), partial(SpatialFc, spatial_shape=[7, 7])]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = MetaFormer(layers, embed_dims=embed_dims, token_mixers=token_mixers, mlp_ratios=mlp_ratios, norm_layer=GroupNorm, downsamples=downsamples, **kwargs)
model.default_cfg = _cfg()
if pretrained:
url = model_urls['metaformer_ppff_s12_224']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint)
return model |
def run_test(test, args=(), test_atol=1e-05, n=100, iters=None, callback=None, minimizer_kwargs=None, options=None, sampling_method='sobol'):
res = shgo(test.f, test.bounds, args=args, constraints=test.cons, n=n, iters=iters, callback=callback, minimizer_kwargs=minimizer_kwargs, options=options, sampling_method=sampling_method)
logging.info(res)
if (test.expected_x is not None):
numpy.testing.assert_allclose(res.x, test.expected_x, rtol=test_atol, atol=test_atol)
if (test.expected_fun is not None):
numpy.testing.assert_allclose(res.fun, test.expected_fun, atol=test_atol)
if (test.expected_xl is not None):
numpy.testing.assert_allclose(res.xl, test.expected_xl, atol=test_atol)
if (test.expected_funl is not None):
numpy.testing.assert_allclose(res.funl, test.expected_funl, atol=test_atol)
return |
def prepare_config(exp_config: Union[(List[str], str)], run_type: str, ckpt_path='', opts=None, suffix=None) -> None:
config = get_config(exp_config, opts)
if isinstance(exp_config, str):
variant_config = exp_config
else:
variant_config = exp_config[(- 1)]
variant_name = osp.split(variant_config)[1].split('.')[0]
config.defrost()
config.VARIANT = variant_name
if (suffix is not None):
config.TENSORBOARD_DIR = osp.join(config.TENSORBOARD_DIR, suffix)
config.CHECKPOINT_DIR = osp.join(config.CHECKPOINT_DIR, suffix)
config.LOG_DIR = osp.join(config.LOG_DIR, suffix)
config.TENSORBOARD_DIR = osp.join(config.TENSORBOARD_DIR, config.VARIANT)
config.CHECKPOINT_DIR = osp.join(config.CHECKPOINT_DIR, config.VARIANT)
config.LOG_DIR = osp.join(config.LOG_DIR, config.VARIANT)
config.freeze()
if (ckpt_path is not None):
if (not osp.exists(ckpt_path)):
ckpt_path = osp.join(config.CHECKPOINT_DIR, ckpt_path)
np.random.seed(config.SEED)
random.seed(config.SEED)
torch.random.manual_seed(config.SEED)
torch.backends.cudnn.deterministic = True
return (config, ckpt_path) |
def hans_convert_examples_to_features(examples, tokenizer, max_length=512, task=None, label_list=None, output_mode=None, pad_on_left=False, pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True):
is_tf_dataset = False
if (is_tf_available() and isinstance(examples, tf.data.Dataset)):
is_tf_dataset = True
if (task is not None):
processor = glue_processors[task]()
if (label_list is None):
label_list = processor.get_labels()
logger.info(('Using label list %s for task %s' % (label_list, task)))
if (output_mode is None):
output_mode = glue_output_modes[task]
logger.info(('Using output mode %s for task %s' % (output_mode, task)))
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info(('Writing example %d' % ex_index))
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length)
(input_ids, token_type_ids) = (inputs['input_ids'], inputs['token_type_ids'])
attention_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (max_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
attention_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + attention_mask)
token_type_ids = (([pad_token_segment_id] * padding_length) + token_type_ids)
else:
input_ids = (input_ids + ([pad_token] * padding_length))
attention_mask = (attention_mask + ([(0 if mask_padding_with_zero else 1)] * padding_length))
token_type_ids = (token_type_ids + ([pad_token_segment_id] * padding_length))
assert (len(input_ids) == max_length), 'Error with input length {} vs {}'.format(len(input_ids), max_length)
assert (len(attention_mask) == max_length), 'Error with input length {} vs {}'.format(len(attention_mask), max_length)
assert (len(token_type_ids) == max_length), 'Error with input length {} vs {}'.format(len(token_type_ids), max_length)
if (output_mode == 'classification'):
label = (label_map[example.label] if (example.label in label_map) else 0)
elif (output_mode == 'regression'):
label = float(example.label)
else:
raise KeyError(output_mode)
pairID = str(example.pairID)
if (ex_index < 10):
logger.info('*** Example ***')
logger.info(('text_a: %s' % example.text_a))
logger.info(('text_b: %s' % example.text_b))
logger.info(('guid: %s' % example.guid))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('attention_mask: %s' % ' '.join([str(x) for x in attention_mask])))
logger.info(('token_type_ids: %s' % ' '.join([str(x) for x in token_type_ids])))
logger.info(('label: %s (id = %d)' % (example.label, label)))
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label, pairID=pairID))
if (is_tf_available() and is_tf_dataset):
def gen():
for ex in features:
(yield ({'input_ids': ex.input_ids, 'attention_mask': ex.attention_mask, 'token_type_ids': ex.token_type_ids}, ex.label))
return tf.data.Dataset.from_generator(gen, ({'input_ids': tf.int32, 'attention_mask': tf.int32, 'token_type_ids': tf.int32}, tf.int64), ({'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None]), 'token_type_ids': tf.TensorShape([None])}, tf.TensorShape([])))
return features |
def dilated_basic_1d(filters, suffix, stage=0, block=0, kernel_size=3, numerical_name=False, stride=None, dilations=(1, 1)):
if (stride is None):
if ((block != 0) or (stage == 0)):
stride = 1
else:
stride = 2
if ((block > 0) and numerical_name):
block_char = 'b{}'.format(block)
else:
block_char = chr((ord('a') + block))
stage_char = str((stage + 2))
def f(x):
y = Conv1D(filters, kernel_size, padding='causal', strides=stride, dilation_rate=dilations[0], use_bias=False, name='res{}{}_branch2a_{}'.format(stage_char, block_char, suffix), **parameters)(x)
y = BatchNormalization(epsilon=1e-05, name='bn{}{}_branch2a_{}'.format(stage_char, block_char, suffix))(y)
y = Activation('relu', name='res{}{}_branch2a_relu_{}'.format(stage_char, block_char, suffix))(y)
y = Conv1D(filters, kernel_size, padding='causal', use_bias=False, dilation_rate=dilations[1], name='res{}{}_branch2b_{}'.format(stage_char, block_char, suffix), **parameters)(y)
y = BatchNormalization(epsilon=1e-05, name='bn{}{}_branch2b_{}'.format(stage_char, block_char, suffix))(y)
if (block == 0):
shortcut = Conv1D(filters, 1, strides=stride, use_bias=False, name='res{}{}_branch1_{}'.format(stage_char, block_char, suffix), **parameters)(x)
shortcut = BatchNormalization(epsilon=1e-05, name='bn{}{}_branch1_{}'.format(stage_char, block_char, suffix))(shortcut)
else:
shortcut = x
y = Add(name='res{}{}_{}'.format(stage_char, block_char, suffix))([y, shortcut])
y = Activation('relu', name='res{}{}_relu_{}'.format(stage_char, block_char, suffix))(y)
return y
return f |
def new_empty(g, self, sizes, dtype, layout, device, pin_memory=False):
if ((dtype is None) and self.isCompleteTensor()):
dtype = self.type().scalarType()
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
return empty(g, sizes, dtype, layout, device, pin_memory) |
def launch_experiment(variant, get_config=None, get_offline_algorithm=None, exp_postfix='', use_gpu=True, log_to_tensorboard=False, data_args=None):
experiment_config = dict()
if (get_config is not None):
experiment_config['get_config'] = get_config
if (get_offline_algorithm is not None):
experiment_config['get_offline_algorithm'] = get_offline_algorithm
run_experiment(variant=variant, experiment_config=experiment_config, exp_postfix=exp_postfix, use_gpu=use_gpu, log_to_tensorboard=log_to_tensorboard, data_args=data_args) |
def nano_sleep(time_ns):
wait_until = (time.time_ns() + time_ns)
while (time.time_ns() < wait_until):
pass |
def count_string_tokens(string: str, model_name: str) -> int:
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warn('Warning: model not found. Using cl100k_base encoding.')
encoding = tiktoken.get_encoding('cl100k_base')
return len(encoding.encode(string)) |
def test_gather():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.gather(x, indices=0, axis=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
class BaseDetNeck(nn.Module, metaclass=ABCMeta):
def __init__(self, subtype=None, cfg=None, in_channels=None, mid_channels=None, out_channels=None, num_blocks=None, aux_out_channels=None, depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='ReLU')):
super(BaseDetNeck, self).__init__()
self.subtype = subtype
self.cfg = cfg
self.in_channels = in_channels
self.mid_channels = mid_channels
self.out_channels = out_channels
self.num_blocks = num_blocks
self.aux_out_channels = aux_out_channels
self.depthwise = depthwise
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
(depth_mul, width_mul) = self.cfg[self.subtype.split('_')[1]]
self.in_channels = list(map((lambda x: max(round((x * width_mul)), 1)), self.in_channels))
if (self.mid_channels is not None):
self.mid_channels = list(map((lambda x: max(round((x * width_mul)), 1)), self.mid_channels))
if (self.out_channels is not None):
self.out_channels = list(map((lambda x: max(round((x * width_mul)), 1)), self.out_channels))
if (self.num_blocks is not None):
self.num_blocks = list(map((lambda x: max(round((x * depth_mul)), 1)), self.num_blocks))
def forward(self, x):
pass |
def isnan(x):
ftype = impl.get_runtime().default_fp
fx = ops.cast(x, ftype)
if static((ftype == f64)):
y = ops.bit_cast(fx, u64)
return (((ops.cast((y >> 32), u32) & ) + (ops.cast(y, u32) != 0)) > )
y = ops.bit_cast(fx, u32)
return ((y & ) > ) |
def _impl(array, value_set, skip_nones, highlevel, behavior, attrs):
from awkward._connect.pyarrow import import_pyarrow_compute
from awkward.operations.str import _apply_through_arrow
pc = import_pyarrow_compute('ak.str.index_in')
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
(layout, value_set_layout) = ensure_same_backend(ctx.unwrap(array, allow_record=False), ctx.unwrap(value_set, allow_record=False))
if (not _is_maybe_optional_list_of_string(value_set_layout)):
raise TypeError('`value_set` must be 1D array of (possibly missing) strings')
def apply(layout, **kwargs):
if _is_maybe_optional_list_of_string(layout):
return _apply_through_arrow(pc.index_in, layout, value_set_layout, skip_nulls=skip_nones, expect_option_type=True, generate_bitmasks=True)
out = ak._do.recursively_apply(layout, apply)
return ctx.wrap(out, highlevel=highlevel) |
class BloomInt8(CausalInt8Model):
config_name: str = 'bloom_int8'
def __init__(self, weights_path: Optional[str]=None):
super().__init__(BloomInt8Engine.config_name, weights_path) |
_module()
class VCRDataset(MInstrDataset):
def __init__(self, *args, version, **kwargs):
super().__init__(*args, **kwargs, placeholders=(IMAGE_PLACEHOLDER, QUESTION_PLACEHOLDER))
self.version = version
assert (version in ['q-a', 'q-ra', 'qc-a', 'qc-ra', 'qc-rac', 'qa-r', 'q-a-q-r', 'qac-r', 'qc-a-qc-r'])
def __getitem__(self, index, force_answer_label=None, force_rationale_label=None):
item = self.get_raw_item(index)
image = self.get_image(item['img_fn'])
boxes_with_prob = item['boxes']
boxes = [box[:4] for box in boxes_with_prob]
question = item['question']
answer_choices = item['answer_choices']
rationale_choices = item['rationale_choices']
if (force_answer_label is not None):
answer_label = force_answer_label
else:
answer_label = item['answer_label']
if (force_rationale_label is not None):
rationale_label = force_rationale_label
else:
rationale_label = item['rationale_label']
question_pack = prepare_sentence(question)
answer_pack_choices = [prepare_sentence(_) for _ in answer_choices]
rationale_pack_choices = [prepare_sentence(_) for _ in rationale_choices]
(answer_choices_pack, answer_choice) = prepare_choice(answer_pack_choices, answer_label)
(rationale_choices_pack, rationale_choice) = prepare_choice(rationale_pack_choices, rationale_label)
answer_gold_pack = answer_pack_choices[answer_label]
rationale_gold_pack = rationale_pack_choices[rationale_label]
version = self.version
if (version == 'q-a'):
final_packs = [merge([question_pack], prefixs=['QUESTION:']), answer_gold_pack]
elif (version == 'q-ra'):
final_packs = [merge([question_pack], prefixs=['QUESTION:']), merge([rationale_gold_pack, answer_gold_pack], prefixs=['', ''])]
elif (version == 'qc-a'):
final_packs = [merge([question_pack, answer_choices_pack], prefixs=['QUESTION:', '\nOPTIONS:'], postfixs=['', 'You should decide on the best choice and output the corresponding letter.']), answer_choice]
elif (version == 'qc-ra'):
final_packs = [merge([question_pack, answer_choices_pack], prefixs=['QUESTION:', '\nOPTIONS:'], postfixs=['', 'You should decide on the best choice and output the corresponding letter.']), merge([rationale_gold_pack, answer_choice], prefixs=['', ''])]
elif (version == 'qc-rac'):
final_packs = [merge([question_pack, answer_choices_pack], prefixs=['QUESTION:', '\nOPTIONS:'], postfixs=['', 'You should decide on the best choice and output the corresponding letter.']), merge([rationale_gold_pack, answer_gold_pack, answer_choice], prefixs=['', '', ''])]
elif (version == 'qa-r'):
final_packs = [merge([question_pack, answer_gold_pack], prefixs=['QUESTION:', '\nANSWER:'], postfixs=['', 'You should explain the reason for the above answer.']), rationale_gold_pack]
elif (version == 'qac-r'):
final_packs = [merge([question_pack, answer_gold_pack, rationale_choices_pack], prefixs=['QUESTION:', '\nANSWER:', '\nRATIONALE OPTIONS:'], postfixs=['', '', 'You should decide on the best choice that explains the above answer and output the corresponding letter.']), rationale_choice]
elif (version == 'q-a-q-r'):
final_packs = [merge([question_pack], prefixs=['QUESTION:']), answer_gold_pack, ('You should explain the reason for the above answer.', ()), rationale_gold_pack]
elif (version == 'qc-a-qc-r'):
final_packs = [merge([question_pack, answer_choices_pack], prefixs=['QUESTION:', '\nOPTIONS:'], postfixs=['', 'You should decide on the best choice and output the corresponding letter.']), answer_choice, merge([rationale_choices_pack], prefixs=['RATIONALE OPTIONS:'], postfixs=['You should decide on the best choice that explains the above answer and output the corresponding letter.']), rationale_choice]
else:
assert False
conversations = []
roles = ['human', 'gpt']
for (idx, pack) in enumerate(final_packs):
conversations.append({'from': roles[(idx % 2)], 'value': pack[0], 'boxes_seq': pack[1]})
conversations[0]['value'] = self.get_template().replace(QUESTION_PLACEHOLDER, conversations[0]['value'])
ret = {'image': image, 'target': {'boxes': boxes}, 'conversations': conversations}
return ret |
def disambiguate_grad_if_op_output(grad_op, idx, new_grad_output):
then_net = _get_net_argument(grad_op, 'then_net')
old_grad_out_match = grad_op.output[idx]
for op in then_net.op:
for (i, out) in enumerate(op.output):
if (out == old_grad_out_match):
op.output[i] = new_grad_output
else_net = _get_net_argument(grad_op, 'else_net')
if else_net:
for op in else_net.op:
for (i, out) in enumerate(op.output):
if (out == old_grad_out_match):
op.output[i] = new_grad_output
grad_op.output[idx] = new_grad_output |
_test()
def test_ddr_reduce_red_1x40_8b_decouple_array_interfaces():
with set_temporary('compiler', 'xilinx', 'decouple_array_interfaces', value=True):
return exec_test(1, 40, 8, 'ddr', 'red_1x40_8b_decoupled') |
def train(args, train_loader, model, criterion, optimizer, epoch):
model.train()
iouEvalTrain = iouEval(args.classes)
epoch_loss = []
total_batches = len(train_loader)
for (i, (input, target)) in enumerate(train_loader):
start_time = time.time()
if (args.onGPU == True):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
output = model(input_var)
optimizer.zero_grad()
loss = criterion(output, target_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
time_taken = (time.time() - start_time)
iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)
print(('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.data[0], time_taken)))
average_epoch_loss_train = (sum(epoch_loss) / len(epoch_loss))
(overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalTrain.getMetric()
return (average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU) |
class FormattedTimesMixin(object):
cpu_time_str = attr_formatter('cpu_time')
cuda_time_str = attr_formatter('cuda_time')
cpu_time_total_str = attr_formatter('cpu_time_total')
cuda_time_total_str = attr_formatter('cuda_time_total')
self_cpu_time_total_str = attr_formatter('self_cpu_time_total')
self_cuda_time_total_str = attr_formatter('self_cuda_time_total')
def cpu_time(self):
return (0.0 if (self.count == 0) else ((1.0 * self.cpu_time_total) / self.count))
def cuda_time(self):
return (0.0 if (self.count == 0) else ((1.0 * self.cuda_time_total) / self.count)) |
def make_regex(obj):
if (not can_be_regex(obj)):
raise ValueError('Expected a string or a regex, got: {}'.format(type(obj)))
if isinstance(obj, string_types):
return re.compile(obj)
else:
return obj |
def register_Ns3Ipv6AddressGenerator_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Ipv6AddressGenerator const &', 'arg0')])
cls.add_method('AddAllocated', 'bool', [param('ns3::Ipv6Address const', 'addr')], is_static=True)
cls.add_method('GetAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const', 'prefix')], is_static=True)
cls.add_method('GetNetwork', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const', 'prefix')], is_static=True)
cls.add_method('Init', 'void', [param('ns3::Ipv6Address const', 'net'), param('ns3::Ipv6Prefix const', 'prefix'), param('ns3::Ipv6Address const', 'interfaceId', default_value='"::1"')], is_static=True)
cls.add_method('InitAddress', 'void', [param('ns3::Ipv6Address const', 'interfaceId'), param('ns3::Ipv6Prefix const', 'prefix')], is_static=True)
cls.add_method('NextAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const', 'prefix')], is_static=True)
cls.add_method('NextNetwork', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const', 'prefix')], is_static=True)
cls.add_method('Reset', 'void', [], is_static=True)
cls.add_method('TestMode', 'void', [], is_static=True)
return |
('colorbar', orientation='vertical', format=None, spacing='uniform')
('label', fontsize=9, colors='blue', inline=None, inline_spacing=3, fmt='%1.2f')
(plot_points=100, fill=True, contours=None, linewidths=None, linestyles=None, labels=False, frame=True, axes=False, colorbar=False, legend_label=None, aspect_ratio=1, region=None)
def contour_plot(f, xrange, yrange, **options):
from sage.plot.all import Graphics
from sage.plot.misc import setup_for_eval_on_grid
region = options.pop('region')
ev = ([f] if (region is None) else [f, region])
(F, ranges) = setup_for_eval_on_grid(ev, [xrange, yrange], options['plot_points'])
h = F[0]
(xrange, yrange) = (r[:2] for r in ranges)
xy_data_array = [[h(x, y) for x in xsrange(*ranges[0], include_endpoint=True)] for y in xsrange(*ranges[1], include_endpoint=True)]
g = Graphics()
scale = options.get('scale', None)
if isinstance(scale, (list, tuple)):
scale = scale[0]
if (scale in ('semilogy', 'semilogx')):
options['aspect_ratio'] = 'automatic'
g._set_extra_kwds(Graphics._extract_kwds_for_show(options, ignore=['xmin', 'xmax']))
if (isinstance(options['contours'], (list, tuple)) and (len(options['contours']) == 1) and (options.get('fill') is False)):
import numpy as np
dx = ranges[0][2]
dy = ranges[1][2]
z0 = options['contours'][0]
tol = (max(dx, dy) / 4.0)
xy_data_array = np.ma.asarray(xy_data_array, dtype=float)
if np.all((np.abs((xy_data_array - z0)) <= tol)):
xy_data_array.fill(z0)
if ('cmap' in options):
if isinstance(options['cmap'], (list, tuple)):
oldcmap = options['cmap'][0]
else:
oldcmap = options['cmap']
else:
oldcmap = 'gray'
options['cmap'] = ['white', oldcmap]
options['contours'] = ((z0 - 1), z0)
options['fill'] = True
else:
c = 1
if np.all((xy_data_array <= z0)):
xy_data_array *= (- 1)
c = (- 1)
if (np.all((xy_data_array >= z0)) and np.any(((xy_data_array - z0) < tol))):
from warnings import warn
warn('pathological contour plot of a function whose values all lie on one side of the sole contour; we are adding more plot points and perturbing your function values.')
if (not isinstance(options['plot_points'], (list, tuple))):
options['plot_points'] = (options['plot_points'], options['plot_points'])
options['plot_points'] = ((options['plot_points'][0] * 4), (options['plot_points'][1] * 4))
(F, ranges) = setup_for_eval_on_grid(ev, [xrange, yrange], options['plot_points'])
h = F[0]
(xrange, yrange) = (r[:2] for r in ranges)
xy_data_array = [[(h(x, y) - (c * tol)) for x in xsrange(*ranges[0], include_endpoint=True)] for y in xsrange(*ranges[1], include_endpoint=True)]
if (region is not None):
import numpy
xy_data_array = numpy.ma.asarray(xy_data_array, dtype=float)
m = F[1]
mask = numpy.asarray([[(m(x, y) <= 0) for x in xsrange(*ranges[0], include_endpoint=True)] for y in xsrange(*ranges[1], include_endpoint=True)], dtype=bool)
xy_data_array[mask] = numpy.ma.masked
g.add_primitive(ContourPlot(xy_data_array, xrange, yrange, options))
return g |
class KGDataset():
def __init__(self, entity_path, relation_path, train_path, valid_path=None, test_path=None, format=[0, 1, 2], delimiter='\t', skip_first_line=False):
self.delimiter = delimiter
(self.entity2id, self.n_entities) = self.read_entity(entity_path)
(self.relation2id, self.n_relations) = self.read_relation(relation_path)
self.train = self.read_triple(train_path, 'train', skip_first_line, format)
if (valid_path is not None):
self.valid = self.read_triple(valid_path, 'valid', skip_first_line, format)
else:
self.valid = None
if (test_path is not None):
self.test = self.read_triple(test_path, 'test', skip_first_line, format)
else:
self.test = None
def read_entity(self, entity_path):
with open(entity_path) as f:
entity2id = {}
for line in f:
(eid, entity) = line.strip().split(self.delimiter)
entity2id[entity] = int(eid)
return (entity2id, len(entity2id))
def read_relation(self, relation_path):
with open(relation_path) as f:
relation2id = {}
for line in f:
(rid, relation) = line.strip().split(self.delimiter)
relation2id[relation] = int(rid)
return (relation2id, len(relation2id))
def read_triple(self, path, mode, skip_first_line=False, format=[0, 1, 2]):
if (path is None):
return None
print('Reading {} triples....'.format(mode))
heads = []
tails = []
rels = []
with open(path) as f:
if skip_first_line:
_ = f.readline()
for line in f:
triple = line.strip().split(self.delimiter)
(h, r, t) = (triple[format[0]], triple[format[1]], triple[format[2]])
heads.append(self.entity2id[h])
rels.append(self.relation2id[r])
tails.append(self.entity2id[t])
heads = np.array(heads, dtype=np.int64)
tails = np.array(tails, dtype=np.int64)
rels = np.array(rels, dtype=np.int64)
print('Finished. Read {} {} triples.'.format(len(heads), mode))
return (heads, rels, tails) |
def main():
with _utils.tqdm_stdout() as orig_stdout:
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True)
parser.add_argument('-H', '--visdom-host', type=str, required=False)
parser.add_argument('-P', '--visdom-port', type=int, required=False)
parser.add_argument('-E', '--visdom-env-path', type=str, required=False)
parser.add_argument('-b', '--batch-train', type=int, required=False)
parser.add_argument('-B', '--batch-test', type=int, required=False)
parser.add_argument('-w', '--workers-train', type=int, required=False)
parser.add_argument('-W', '--workers-test', type=int, required=False)
parser.add_argument('-e', '--epochs', type=int, required=False)
parser.add_argument('-L', '--log-interval', type=int, required=False)
parser.add_argument('-M', '--saved-models-path', type=str, required=False)
parser.add_argument('-R', '--random-seed', type=int, required=False)
parser.add_argument('-s', '--suffix', type=str, required=False)
parser.add_argument('-S', '--skip-train-val', action='store_true', default=False)
(args, unknown_args) = parser.parse_known_args()
if (args.batch_test is None):
args.batch_test = args.batch_train
if (args.random_seed is not None):
args.suffix = '{}r-{}'.format(('{}_'.format(args.suffix) if (args.suffix is not None) else ''), args.random_seed)
np.random.seed(args.random_seed)
torch.random.manual_seed(args.random_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
configs_found = list(sorted(glob.glob(os.path.expanduser(args.config))))
prog_bar_exps = tqdm.tqdm(configs_found, desc='Experiments', unit='setup', file=orig_stdout, dynamic_ncols=True)
for config_path in prog_bar_exps:
config = json.load(open(config_path))
if unknown_args:
tqdm.tqdm.write('\nParsing additional arguments...')
args_not_found = list()
for arg in unknown_args:
if arg.startswith('--'):
keys = arg.strip('-').split('.')
section = config
found = True
for key in keys:
if (key in section):
section = section[key]
else:
found = False
break
if found:
override_parser = argparse.ArgumentParser()
section_nargs = None
section_type = (type(section) if (section is not None) else str)
if (section_type is bool):
if (section_type is bool):
def infer_bool(x: str) -> bool:
return (x.lower() not in ('0', 'false', 'no'))
section_type = infer_bool
if (isinstance(section, Iterable) and (section_type is not str)):
section_nargs = '+'
section_type = {type(value) for value in section}
if (len(section_type) == 1):
section_type = section_type.pop()
else:
section_type = str
override_parser.add_argument(arg, nargs=section_nargs, type=section_type)
(overridden_args, _) = override_parser.parse_known_args(unknown_args)
overridden_args = vars(overridden_args)
overridden_key = arg.strip('-')
overriding_value = overridden_args[overridden_key]
section = config
old_value = None
for (i, key) in enumerate(keys, 1):
if (i == len(keys)):
old_value = section[key]
section[key] = overriding_value
else:
section = section[key]
tqdm.tqdm.write(colored(f'Overriding "{overridden_key}": {old_value} -> {overriding_value}', 'magenta'))
else:
args_not_found.append(arg)
if args_not_found:
tqdm.tqdm.write(colored('\nThere are unrecognized arguments to override: {}'.format(', '.join(args_not_found)), 'red'))
config = defaultdict(None, config)
experiment_name = config['Setup']['name']
visdom_host = _utils.arg_selector(args.visdom_host, config['Visdom']['host'], VISDOM_HOST)
visdom_port = int(_utils.arg_selector(args.visdom_port, config['Visdom']['port'], VISDOM_PORT))
visdom_env_path = _utils.arg_selector(args.visdom_env_path, config['Visdom']['env_path'], VISDOM_ENV_PATH)
batch_train = int(_utils.arg_selector(args.batch_train, config['Setup']['batch_train'], BATCH_TRAIN))
batch_test = int(_utils.arg_selector(args.batch_test, config['Setup']['batch_test'], BATCH_TEST))
workers_train = _utils.arg_selector(args.workers_train, config['Setup']['workers_train'], WORKERS_TRAIN)
workers_test = _utils.arg_selector(args.workers_test, config['Setup']['workers_test'], WORKERS_TEST)
epochs = _utils.arg_selector(args.epochs, config['Setup']['epochs'], EPOCHS)
log_interval = _utils.arg_selector(args.log_interval, config['Setup']['log_interval'], LOG_INTERVAL)
saved_models_path = _utils.arg_selector(args.saved_models_path, config['Setup']['saved_models_path'], SAVED_MODELS_PATH)
model_class = config['Model']['class']
model_args = config['Model']['args']
optimizer_class = config['Optimizer']['class']
optimizer_args = config['Optimizer']['args']
if ('Scheduler' in config):
scheduler_class = config['Scheduler']['class']
scheduler_args = config['Scheduler']['args']
else:
scheduler_class = None
scheduler_args = None
dataset_class = config['Dataset']['class']
dataset_args = config['Dataset']['args']
transforms = config['Transforms']
performance_metrics = config['Metrics']
tqdm.tqdm.write(f'''
Starting experiment "{experiment_name}"
''')
run(experiment_name=experiment_name, visdom_host=visdom_host, visdom_port=visdom_port, visdom_env_path=visdom_env_path, model_class=model_class, model_args=model_args, optimizer_class=optimizer_class, optimizer_args=optimizer_args, dataset_class=dataset_class, dataset_args=dataset_args, batch_train=batch_train, batch_test=batch_test, workers_train=workers_train, workers_test=workers_test, transforms=transforms, epochs=epochs, log_interval=log_interval, saved_models_path=saved_models_path, performance_metrics=performance_metrics, scheduler_class=scheduler_class, scheduler_args=scheduler_args, model_suffix=config['Setup']['suffix'], setup_suffix=args.suffix, orig_stdout=orig_stdout, skip_train_val=args.skip_train_val)
prog_bar_exps.close()
tqdm.tqdm.write('\n') |
.parametrize('observation_shape', [(100,)])
.parametrize('action_size', [2])
.parametrize('n_episodes', [100])
.parametrize('episode_length', [10])
def test_initial_state_value_estimation_scorer(observation_shape: Sequence[int], action_size: int, n_episodes: int, episode_length: int) -> None:
A = np.random.random((*observation_shape, action_size))
episodes = []
for _ in range(n_episodes):
observations = np.random.random((episode_length, *observation_shape))
actions = np.matmul(observations, A).astype('f4')
rewards = np.random.random((episode_length, 1)).astype('f4')
episode = Episode(observations.astype('f4'), actions, rewards, False)
episodes.append(episode)
algo = DummyAlgo(A, 0.0)
total_values = []
for episode in episodes:
observation = episode.observations[0].reshape(1, (- 1))
policy_actions = algo.predict(observation)
values = algo.predict_value(observation, policy_actions)
total_values.append(values)
score = InitialStateValueEstimationEvaluator()(algo, _create_replay_buffer(episodes))
assert np.allclose(score, np.mean(total_values)) |
def left():
PressKey(Z)
PressKey(Q)
ReleaseKey(D)
time.sleep(t_time)
ReleaseKey(Q) |
class CopyCheckTester(unittest.TestCase):
def setUp(self):
self.transformer_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, 'models/bert/'))
check_copies.TRANSFORMER_PATH = self.transformer_dir
shutil.copy(os.path.join(git_repo_path, 'src/transformers/models/bert/modeling_bert.py'), os.path.join(self.transformer_dir, 'models/bert/modeling_bert.py'))
def tearDown(self):
check_copies.TRANSFORMER_PATH = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None):
code = ((comment + f'''
class {class_name}(nn.Module):
''') + class_code)
if (overwrite_result is not None):
expected = ((comment + f'''
class {class_name}(nn.Module):
''') + overwrite_result)
code = black.format_str(code, mode=black.FileMode([black.TargetVersion.PY35], line_length=119))
fname = os.path.join(self.transformer_dir, 'new_code.py')
with open(fname, 'w', newline='\n') as f:
f.write(code)
if (overwrite_result is None):
self.assertTrue((len(check_copies.is_copy_consistent(fname)) == 0))
else:
check_copies.is_copy_consistent(f.name, overwrite=True)
with open(fname, 'r') as f:
self.assertTrue(f.read(), expected)
def test_find_code_in_transformers(self):
code = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(code, REFERENCE_CODE)
def test_is_copy_consistent(self):
self.check_copy_consistency('# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', (REFERENCE_CODE + '\n'))
self.check_copy_consistency('# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', REFERENCE_CODE)
self.check_copy_consistency('# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', re.sub('Bert', 'TestModel', REFERENCE_CODE))
long_class_name = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}', f'{long_class_name}LMPredictionHead', re.sub('Bert', long_class_name, REFERENCE_CODE))
self.check_copy_consistency('# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', REFERENCE_CODE, overwrite_result=re.sub('Bert', 'TestModel', REFERENCE_CODE))
def test_convert_to_localized_md(self):
localized_readme = check_copies.LOCALIZED_READMES['README_zh-hans.md']
md_list = '1. **[ALBERT]( (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1. **[DistilBERT]( (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter]( by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2]( RoBERTa into [DistilRoBERTa]( Multilingual BERT into [DistilmBERT]( and a German version of DistilBERT.\n1. **[ELECTRA]( (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators]( by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.'
localized_md_list = '1. **[ALBERT]( ( Google Research and the Toyota Technological Institute at Chicago) [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut \n'
converted_md_list_sample = '1. **[ALBERT]( ( Google Research and the Toyota Technological Institute at Chicago) [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut \n1. **[DistilBERT]( ( HuggingFace) [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter]( Victor Sanh, Lysandre Debut and Thomas Wolf The same method has been applied to compress GPT2 into [DistilGPT2]( RoBERTa into [DistilRoBERTa]( Multilingual BERT into [DistilmBERT]( and a German version of DistilBERT.\n1. **[ELECTRA]( ( Google Research/Stanford University) [ELECTRA: Pre-training text encoders as discriminators rather than generators]( Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning \n'
(num_models_equal, converted_md_list) = check_copies.convert_to_localized_md(md_list, localized_md_list, localized_readme['format_model_list'])
self.assertFalse(num_models_equal)
self.assertEqual(converted_md_list, converted_md_list_sample)
(num_models_equal, converted_md_list) = check_copies.convert_to_localized_md(md_list, converted_md_list, localized_readme['format_model_list'])
self.assertTrue(num_models_equal)
link_changed_md_list = '1. **[ALBERT]( (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
link_unchanged_md_list = '1. **[ALBERT]( ( Google Research and the Toyota Technological Institute at Chicago) [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut \n'
converted_md_list_sample = '1. **[ALBERT]( ( Google Research and the Toyota Technological Institute at Chicago) [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut \n'
(num_models_equal, converted_md_list) = check_copies.convert_to_localized_md(link_changed_md_list, link_unchanged_md_list, localized_readme['format_model_list'])
self.assertEqual(converted_md_list, converted_md_list_sample) |
def test_nest_cf_simple_if_elif():
def simple_if_elif(i: dace.int64):
if (i < 2):
return 0
elif (i < 4):
return 1
elif (i < 6):
return 2
elif (i < 8):
return 3
else:
return 4
sdfg = simple_if_elif.to_sdfg()
nest_sdfg_control_flow(sdfg)
assert (sdfg(0)[0] == 0)
assert (sdfg(2)[0] == 1)
assert (sdfg(4)[0] == 2)
assert (sdfg(7)[0] == 3)
assert (sdfg(15)[0] == 4) |
.parametrize('synthesizer', SYNTHESIZERS)
def test_sampling_reset_sampling(synthesizer):
metadata = SingleTableMetadata.load_from_dict({'METADATA_SPEC_VERSION': 'SINGLE_TABLE_V1', 'columns': {'column1': {'sdtype': 'numerical'}, 'column2': {'sdtype': 'address'}, 'column3': {'sdtype': 'email'}, 'column4': {'sdtype': 'ssn', 'pii': True}}})
data = pd.DataFrame({'column1': list(range(100)), 'column2': [str(i) for i in range(100)], 'column3': [str(i) for i in range(100)], 'column4': [str(i) for i in range(100)]})
if isinstance(synthesizer, (CTGANSynthesizer, TVAESynthesizer)):
synthesizer = synthesizer.__class__(metadata, cuda=False)
else:
synthesizer = synthesizer.__class__(metadata)
synthesizer.fit(data)
sampled1 = synthesizer.sample(10)
synthesizer.reset_sampling()
sampled2 = synthesizer.sample(10)
pd.testing.assert_frame_equal(sampled1, sampled2) |
def _inception_v3(*args, **kwargs):
try:
version = tuple(map(int, torchvision.__version__.split('.')[:2]))
except ValueError:
version = (0,)
if (version >= (0, 6)):
kwargs['init_weights'] = False
return torchvision.models.inception_v3(*args, **kwargs) |
class Request(RequestHooksMixin):
def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
data = ([] if (data is None) else data)
files = ([] if (files is None) else files)
headers = ({} if (headers is None) else headers)
params = ({} if (params is None) else params)
hooks = ({} if (hooks is None) else hooks)
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return ('<Request [%s]>' % self.method)
def prepare(self):
p = PreparedRequest()
p.prepare(method=self.method, url=self.url, headers=self.headers, files=self.files, data=self.data, json=self.json, params=self.params, auth=self.auth, cookies=self.cookies, hooks=self.hooks)
return p |
class SpeakerVerifi_train(Dataset):
def __init__(self, vad_config, key_list, file_path, meta_data, max_timestep=None, n_jobs=12):
self.roots = file_path
self.root_key = key_list
self.max_timestep = max_timestep
self.vad_c = vad_config
self.dataset = []
self.all_speakers = []
for index in range(len(self.root_key)):
cache_path = ((Path(os.path.dirname(__file__)) / '.wav_lengths') / f'{self.root_key[index]}_length.pt')
cache_path.parent.mkdir(exist_ok=True)
root = Path(self.roots[index])
if (not cache_path.is_file()):
def trimmed_length(path):
(wav_sample, _) = apply_effects_file(path, EFFECTS)
wav_sample = wav_sample.squeeze(0)
length = wav_sample.shape[0]
return length
wav_paths = find_files(root)
wav_lengths = Parallel(n_jobs=n_jobs)((delayed(trimmed_length)(path) for path in tqdm.tqdm(wav_paths, desc='Preprocessing')))
wav_tags = [Path(path).parts[(- 3):] for path in wav_paths]
torch.save([wav_tags, wav_lengths], str(cache_path))
else:
(wav_tags, wav_lengths) = torch.load(str(cache_path))
wav_paths = [root.joinpath(*tag) for tag in wav_tags]
speaker_dirs = [f.stem for f in root.iterdir() if f.is_dir()]
self.all_speakers.extend(speaker_dirs)
for (path, length) in zip(wav_paths, wav_lengths):
if (length > self.vad_c['min_sec']):
self.dataset.append(path)
self.all_speakers.sort()
self.speaker_num = len(self.all_speakers)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
path = self.dataset[idx]
(wav, _) = apply_effects_file(str(path), EFFECTS)
wav = wav.squeeze(0)
length = wav.shape[0]
if (self.max_timestep != None):
if (length > self.max_timestep):
start = random.randint(0, int((length - self.max_timestep)))
wav = wav[start:(start + self.max_timestep)]
tags = Path(path).parts[(- 3):]
utterance_id = '-'.join(tags).replace('.wav', '')
label = self.all_speakers.index(tags[0])
return (wav.numpy(), utterance_id, label)
def collate_fn(self, samples):
return zip(*samples) |
def def_API(name, result, params):
global API2Id, next_id
global log_h, log_c
mk_py_binding(name, result, params)
reg_dotnet(name, result, params)
API2Id[next_id] = name
mk_log_header(log_h, name, params)
log_h.write(';\n')
mk_log_header(log_c, name, params)
log_c.write(' {\n R();\n')
mk_exec_header(exe_c, name)
exe_c.write(' {\n')
i = 0
exe_c.write(' ')
if is_obj(result):
exe_c.write(('%s result = ' % type2str(result)))
exe_c.write(('%s(\n ' % name))
for p in params:
kind = param_kind(p)
ty = param_type(p)
if (i > 0):
exe_c.write(',\n ')
if (kind == IN):
if is_obj(ty):
log_c.write((' P(a%s);\n' % i))
exe_c.write(('reinterpret_cast<%s>(in.get_obj(%s))' % (param2str(p), i)))
elif (ty == STRING):
log_c.write((' S(a%s);\n' % i))
exe_c.write(('in.get_str(%s)' % i))
elif (ty == SYMBOL):
log_c.write((' Sy(a%s);\n' % i))
exe_c.write(('in.get_symbol(%s)' % i))
elif (ty == UINT):
log_c.write((' U(a%s);\n' % i))
exe_c.write(('in.get_uint(%s)' % i))
elif (ty == UINT64):
log_c.write((' U(a%s);\n' % i))
exe_c.write(('in.get_uint64(%s)' % i))
elif (ty == INT):
log_c.write((' I(a%s);\n' % i))
exe_c.write(('in.get_int(%s)' % i))
elif (ty == INT64):
log_c.write((' I(a%s);\n' % i))
exe_c.write(('in.get_int64(%s)' % i))
elif (ty == DOUBLE):
log_c.write((' D(a%s);\n' % i))
exe_c.write(('in.get_double(%s)' % i))
elif (ty == FLOAT):
log_c.write((' D(a%s);\n' % i))
exe_c.write(('in.get_float(%s)' % i))
elif (ty == BOOL):
log_c.write((' I(a%s);\n' % i))
exe_c.write(('in.get_bool(%s)' % i))
elif (ty == VOID_PTR):
log_c.write(' P(0);\n')
exe_c.write(('in.get_obj_addr(%s)' % i))
elif ((ty == PRINT_MODE) or (ty == ERROR_CODE)):
log_c.write((' U(static_cast<unsigned>(a%s));\n' % i))
exe_c.write(('static_cast<%s>(in.get_uint(%s))' % (type2str(ty), i)))
else:
error(('unsupported parameter for %s, %s' % (name, p)))
elif (kind == INOUT):
error(('unsupported parameter for %s, %s' % (name, p)))
elif (kind == OUT):
if is_obj(ty):
log_c.write(' P(0);\n')
exe_c.write(('reinterpret_cast<%s>(in.get_obj_addr(%s))' % (param2str(p), i)))
elif (ty == STRING):
log_c.write(' S("");\n')
exe_c.write(('in.get_str_addr(%s)' % i))
elif (ty == UINT):
log_c.write(' U(0);\n')
exe_c.write(('in.get_uint_addr(%s)' % i))
elif (ty == UINT64):
log_c.write(' U(0);\n')
exe_c.write(('in.get_uint64_addr(%s)' % i))
elif (ty == INT):
log_c.write(' I(0);\n')
exe_c.write(('in.get_int_addr(%s)' % i))
elif (ty == INT64):
log_c.write(' I(0);\n')
exe_c.write(('in.get_int64_addr(%s)' % i))
elif (ty == VOID_PTR):
log_c.write(' P(0);\n')
exe_c.write(('in.get_obj_addr(%s)' % i))
else:
error(('unsupported parameter for %s, %s' % (name, p)))
elif ((kind == IN_ARRAY) or (kind == INOUT_ARRAY)):
sz = param_array_capacity_pos(p)
log_c.write((' for (unsigned i = 0; i < a%s; i++) { ' % sz))
if is_obj(ty):
log_c.write(('P(a%s[i]);' % i))
log_c.write(' }\n')
log_c.write((' Ap(a%s);\n' % sz))
exe_c.write(('reinterpret_cast<%s*>(in.get_obj_array(%s))' % (type2str(ty), i)))
elif (ty == SYMBOL):
log_c.write(('Sy(a%s[i]);' % i))
log_c.write(' }\n')
log_c.write((' Asy(a%s);\n' % sz))
exe_c.write(('in.get_symbol_array(%s)' % i))
elif (ty == UINT):
log_c.write(('U(a%s[i]);' % i))
log_c.write(' }\n')
log_c.write((' Au(a%s);\n' % sz))
exe_c.write(('in.get_uint_array(%s)' % i))
elif (ty == INT):
log_c.write(('I(a%s[i]);' % i))
log_c.write(' }\n')
log_c.write((' Ai(a%s);\n' % sz))
exe_c.write(('in.get_int_array(%s)' % i))
elif (ty == BOOL):
log_c.write(('U(a%s[i]);' % i))
log_c.write(' }\n')
log_c.write((' Au(a%s);\n' % sz))
exe_c.write(('in.get_bool_array(%s)' % i))
else:
error(('unsupported parameter for %s, %s, %s' % (ty, name, p)))
elif (kind == OUT_ARRAY):
sz = param_array_capacity_pos(p)
sz_p = params[sz]
sz_p_k = param_kind(sz_p)
tstr = type2str(ty)
if ((sz_p_k == OUT) or (sz_p_k == INOUT)):
sz_e = ('(*a%s)' % sz)
else:
sz_e = ('a%s' % sz)
log_c.write((' for (unsigned i = 0; i < %s; i++) { ' % sz_e))
if is_obj(ty):
log_c.write('P(0);')
log_c.write(' }\n')
log_c.write((' Ap(%s);\n' % sz_e))
exe_c.write(('reinterpret_cast<%s*>(in.get_obj_array(%s))' % (tstr, i)))
elif (ty == UINT):
log_c.write('U(0);')
log_c.write(' }\n')
log_c.write((' Au(%s);\n' % sz_e))
exe_c.write(('in.get_uint_array(%s)' % i))
else:
error(('unsupported parameter for %s, %s' % (name, p)))
elif (kind == OUT_MANAGED_ARRAY):
sz = param_array_size_pos(p)
sz_p = params[sz]
sz_p_k = param_kind(sz_p)
tstr = type2str(ty)
if ((sz_p_k == OUT) or (sz_p_k == INOUT)):
sz_e = ('(*a%s)' % sz)
else:
sz_e = ('a%s' % sz)
log_c.write((' for (unsigned i = 0; i < %s; i++) { ' % sz_e))
log_c.write('P(0);')
log_c.write(' }\n')
log_c.write((' Ap(%s);\n' % sz_e))
exe_c.write(('reinterpret_cast<%s**>(in.get_obj_array(%s))' % (tstr, i)))
else:
error(('unsupported parameter for %s, %s' % (name, p)))
i = (i + 1)
log_c.write((' C(%s);\n' % next_id))
exe_c.write(');\n')
if is_obj(result):
exe_c.write(' in.store_result(result);\n')
if ((name == 'Z3_mk_context') or (name == 'Z3_mk_context_rc')):
exe_c.write(' Z3_set_error_handler(result, Z3_replayer_error_handler);')
log_c.write('}\n')
exe_c.write('}\n')
mk_log_macro(log_h, name, params)
if log_result(result, params):
mk_log_result_macro(log_h, name, result, params)
next_id = (next_id + 1) |
def test_olsq_swap_transition():
lsqc_solver = OLSQ_cirq('swap', 'transition')
lsqc_solver.setdevicegraph(device_graph)
lsqc_solver.setprogram(circuit)
assert (lsqc_solver.solve()[2] == 1) |
class JukeboxPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class WEBVIDDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def dataset_cls(self):
return WEBVIDDataset
def dataset_cls_no_false(self):
return WEBVIDDataset
def dataset_name(self):
return 'webvid' |
.parametrize('synthesizer', SYNTHESIZERS)
def test_sampling(synthesizer):
sample_1 = synthesizer.sample(10)
sample_2 = synthesizer.sample(10)
with pytest.raises(AssertionError):
pd.testing.assert_frame_equal(sample_1, sample_2) |
def which(thefile):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
for d in path:
fname = os.path.join(d, thefile)
fnames = [fname]
if (sys.platform == 'win32'):
exts = os.environ.get('PATHEXT', '').split(os.pathsep)
fnames += [(fname + ext) for ext in exts]
for name in fnames:
if (os.access(name, (os.F_OK | os.X_OK)) and (not os.path.isdir(name))):
return name
return None |
def merge(src, tgt, hypos, log_probs, path):
with open(path, 'w') as f:
for (s, t, hs, lps) in zip(src, tgt, hypos, log_probs):
f.write((s + '\n'))
f.write((t + '\n'))
f.write('\n')
for (h, lp) in zip(hs, lps):
f.write(('\t%f\t%s\n' % (lp, h.strip())))
f.write('\n') |
class LstmFlatteningResult(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
(output, (hidden, cell)) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return (output, hidden, cell) |
def debug_logger(log_dir):
logger = getLogger('train')
logger.setLevel(DEBUG)
fmt = Formatter('%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s')
sh = StreamHandler()
sh.setLevel(INFO)
sh.setFormatter(fmt)
logger.addHandler(sh)
fh = FileHandler(filename=log_dir.joinpath('debug.txt'), mode='w')
fh.setLevel(DEBUG)
fh.setFormatter(fmt)
logger.addHandler(fh)
return logger |
def progress_bar(iterator, log_format: Optional[str]=None, log_interval: int=100, log_file: Optional[str]=None, epoch: Optional[int]=None, prefix: Optional[str]=None, tensorboard_logdir: Optional[str]=None, default_log_format: str='tqdm', wandb_project: Optional[str]=None, wandb_run_name: Optional[str]=None, azureml_logging: Optional[bool]=False):
if (log_format is None):
log_format = default_log_format
if (log_file is not None):
handler = logging.FileHandler(filename=log_file)
logger.addHandler(handler)
if ((log_format == 'tqdm') and (not sys.stderr.isatty())):
log_format = 'simple'
if (log_format == 'json'):
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif (log_format == 'none'):
bar = NoopProgressBar(iterator, epoch, prefix)
elif (log_format == 'simple'):
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif (log_format == 'tqdm'):
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(log_format))
if tensorboard_logdir:
try:
import palaas
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
if wandb_project:
bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)
if azureml_logging:
bar = AzureMLProgressBarWrapper(bar)
return bar |
def return_html(story_file):
story_name = path.basename(story_file)
html_string = (HTML_START + '<div style="line-height: 3">')
(all_tokens, cluster_id_to_spans) = story_to_info[story_file]
ment_start_dict = defaultdict(list)
ment_end_dict = defaultdict(list)
for (cluster_idx, ment_list) in cluster_id_to_spans.items():
for (ment_start, ment_end) in ment_list:
ment_start_dict[ment_start].append((ment_end, cluster_idx))
ment_end_dict[ment_end].append((ment_start, cluster_idx))
for ment_start in ment_start_dict:
ment_start_dict[ment_start] = sorted(ment_start_dict[ment_start], key=(lambda x: x[0]), reverse=True)
for ment_end in ment_end_dict:
ment_end_dict[ment_end] = sorted(ment_end_dict[ment_end], key=(lambda x: x[0]), reverse=True)
active_clusters = 0
for (token_idx, token) in enumerate(all_tokens):
token_added = False
if (token == '\n'):
html_string += '<br/>\n'
continue
if (token_idx in ment_start_dict):
for (_, cluster_idx) in ment_start_dict[token_idx]:
prefix = cluster_start_tag
if (len(cluster_id_to_spans[cluster_idx]) == 1):
prefix = singleton_start_tag
html_string += prefix.format((largest_padding - (active_clusters * padding_reduction)))
active_clusters += 1
html_string += (token + ' ')
token_added = True
if (not token_added):
html_string += (token + ' ')
if (token_idx in ment_end_dict):
for (_, cluster_idx) in ment_end_dict[token_idx]:
html_string += (((('<sub>' + str(cluster_idx)) + '</sub>') + end_tag) + ' ')
active_clusters -= 1
assert (active_clusters >= 0)
html_string += '</div></body></html>'
return html_string |
class DDIMSampler(object):
def __init__(self, diffusion, model, schedule='linear', alpha_generator_func=None, set_alpha_scale=None):
super().__init__()
self.diffusion = diffusion
self.model = model
self.device = diffusion.betas.device
self.ddpm_num_timesteps = diffusion.num_timesteps
self.schedule = schedule
self.alpha_generator_func = alpha_generator_func
self.set_alpha_scale = set_alpha_scale
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
attr = attr.to(self.device)
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=False)
alphas_cumprod = self.diffusion.alphas_cumprod
assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep'
to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.device))
self.register_buffer('betas', to_torch(self.diffusion.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.diffusion.alphas_cumprod_prev))
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1))))
(ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=False)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas)))
sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev)))))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
_grad()
def sample(self, S, shape, input, uc_dict=None, guidance_scale=1, mask=None, x0=None):
self.make_schedule(ddim_num_steps=S)
return self.ddim_sampling(shape, input, uc_dict, guidance_scale, mask=mask, x0=x0)
_grad()
def ddim_sampling(self, shape, input, uc_dict=None, guidance_scale=1, mask=None, x0=None):
b = shape[0]
img = input['x']
if (img == None):
img = torch.randn(shape, device=self.device)
input['x'] = img
time_range = np.flip(self.ddim_timesteps)
total_steps = self.ddim_timesteps.shape[0]
iterator = time_range
if (self.alpha_generator_func != None):
alphas = self.alpha_generator_func(len(iterator))
for (i, step) in enumerate(iterator):
if (self.alpha_generator_func != None):
self.set_alpha_scale(self.model, alphas[i])
index = ((total_steps - i) - 1)
input['timesteps'] = torch.full((b,), step, device=self.device, dtype=torch.long)
if (mask is not None):
assert (x0 is not None)
img_orig = self.diffusion.q_sample(x0, input['timesteps'])
img = ((img_orig * mask) + ((1.0 - mask) * img))
input['x'] = img
(img, pred_x0) = self.p_sample_ddim(input, index=index, uc_dict=uc_dict, guidance_scale=guidance_scale)
input['x'] = img
return img
_grad()
def p_sample_ddim(self, input, index, uc_dict=None, guidance_scale=1):
e_t = self.model(input)
if ((uc_dict is not None) and (guidance_scale != 1)):
unconditional_input = dict(x=input['x'], timesteps=input['timesteps'], context=uc_dict['context'], inpainting_extra_input=input['inpainting_extra_input'], condition=uc_dict['condition'])
e_t_uncond = self.model(unconditional_input)
e_t = (e_t_uncond + (guidance_scale * (e_t - e_t_uncond)))
b = input['x'].shape[0]
a_t = torch.full((b, 1, 1, 1), self.ddim_alphas[index], device=self.device)
a_prev = torch.full((b, 1, 1, 1), self.ddim_alphas_prev[index], device=self.device)
sigma_t = torch.full((b, 1, 1, 1), self.ddim_sigmas[index], device=self.device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), self.ddim_sqrt_one_minus_alphas[index], device=self.device)
pred_x0 = ((input['x'] - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = (sigma_t * torch.randn_like(input['x']))
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return (x_prev, pred_x0) |
class MLPReadout(nn.Module):
def __init__(self, in_dim, out_dim, act):
super(MLPReadout, self).__init__()
self.layer1 = nn.Linear(in_dim, out_dim)
self.act = nn.ReLU()
self.out_act = act
def forward(self, x):
ret = self.layer1(x)
return self.out_act(ret) |
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset_file', '-d', required=True, help='dataset file with protein names')
parser.add_argument('--protein_path', '-pp', required=True, help='directory of protein files')
parser.add_argument('--model_path', '-mp', required=True, help='directory of models')
parser.add_argument('--model', '-m', choices=['orig', 'lds'], default='orig', help='select model')
parser.add_argument('--output', '-o', required=True, help='name of the output directory')
parser.add_argument('--f', type=int, default=10, help='parameter for the simplification of points mesh')
parser.add_argument('--T', type=float, default=0.9, help='ligandability threshold')
parser.add_argument('--batch', type=int, default=32, help='batch size')
parser.add_argument('--voxel_size', type=float, default=1.0, help='size of voxel in angstrom')
parser.add_argument('--protonate', action='store_true', help='whether to protonate or not the input protein')
parser.add_argument('--expand', action='store_true', help='whether to expand on residue level the extracted binding sites')
parser.add_argument('--discard_points', action='store_true', help='whether to output or not the computed surface points')
parser.add_argument('--seed', type=int, default=None, help='random seed for KMeans clustering')
return parser.parse_args() |
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3SpectrumValue__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::SpectrumValue const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::SpectrumValue const >', 'arg0')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
def test_meta_post_init(synthetic_slate_bandit_feedback: BanditFeedback) -> None:
ope_ = SlateOffPolicyEvaluation(bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips2])
assert (ope_.ope_estimators_ == {'sips': sips2}), '__post_init__ returns a wrong value'
ope_ = SlateOffPolicyEvaluation(bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3])
assert (ope_.ope_estimators_ == {'sips': sips, 'sips3': sips3}), '__post_init__ returns a wrong value'
necessary_keys = ['slate_id', 'context', 'action', 'reward', 'position']
for i in range(len(necessary_keys)):
for deleted_keys in itertools.combinations(necessary_keys, (i + 1)):
invalid_bandit_feedback_dict = {key: '_' for key in necessary_keys}
for k in deleted_keys:
del invalid_bandit_feedback_dict[k]
with pytest.raises(RuntimeError, match='Missing key*'):
_ = SlateOffPolicyEvaluation(bandit_feedback=invalid_bandit_feedback_dict, ope_estimators=[sips]) |
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training) |
def get_absolute_path(p):
if p.startswith('~'):
p = os.path.expanduser(p)
return os.path.abspath(p) |
((not have_sympy), 'SymPy not installed')
def test_unevaluated_expr():
x = Symbol('x')
e1 = sympy.UnevaluatedExpr(sympy.Symbol('x'))
e2 = UnevaluatedExpr(x)
assert (sympify(e1) == e2)
assert (e2._sympy_() == e1) |
def block_reduction_b(inputs, scope=None, reuse=None):
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) |
def plot_topk_histogram(tag, array, k=10, class_names=None, figsize=None):
(val, ind) = torch.topk(array, k)
fig = plt.Figure(figsize=figsize, facecolor='w', edgecolor='k')
ax = fig.add_subplot(1, 1, 1)
if (class_names is None):
class_names = [str(i) for i in ind]
else:
class_names = [class_names[i] for i in ind]
tick_marks = np.arange(k)
width = 0.75
ax.bar(tick_marks, val, width, color='orange', tick_label=class_names, edgecolor='w', linewidth=1)
ax.set_xlabel('Candidates')
ax.set_xticks(tick_marks)
ax.set_xticklabels(class_names, rotation=(- 45), ha='center')
ax.xaxis.set_label_position('bottom')
ax.xaxis.tick_bottom()
y_tick = np.linspace(0, 1, num=10)
ax.set_ylabel('Frequency')
ax.set_yticks(y_tick)
y_labels = [format(i, '.1f') for i in y_tick]
ax.set_yticklabels(y_labels, ha='center')
for (i, v) in enumerate(val.numpy()):
ax.text((i - 0.1), (v + 0.03), format(v, '.2f'), color='orange', fontweight='bold')
ax.set_title(tag)
fig.set_tight_layout(True)
return fig |
class MethodsDict(CaseInsensitiveDict):
def __getitem__(self, item: Any) -> Any:
try:
return super().__getitem__(item)
except KeyError as exc:
available_methods = ', '.join(map(str.upper, self))
message = f'Method `{item}` not found. Available methods: {available_methods}'
raise KeyError(message) from exc |
def _place_post_grad_agg_ops_hybrid(ps_device, var_op_to_agg_grad, var_op_to_apply_grad_op):
def _find_agg_grad_descendant_ops(agg_grad_ops, apply_grad_ops):
agg_grad_descendant_ops = set()
queue = []
queue.extend(agg_grad_ops)
while (len(queue) > 0):
curr_op = queue.pop()
if (curr_op in agg_grad_descendant_ops):
continue
agg_grad_descendant_ops.add(curr_op)
if (curr_op in apply_grad_ops):
continue
curr_op_consumers = get_consumers(curr_op)
queue.extend(curr_op_consumers)
return agg_grad_descendant_ops
SHARED = (- 1)
def _assign(op_to_task, agg_grad_ops, apply_grad_ops, apply_grad_ancestor_ops, ancestors_diff_descendants, is_parent_to_child):
queue = []
stop = set()
if is_parent_to_child:
queue.extend(agg_grad_ops)
stop.update(apply_grad_ops)
else:
queue.extend(apply_grad_ops)
stop.update(agg_grad_ops)
visited = set()
while (len(queue) > 0):
curr_op = queue.pop(0)
if (curr_op in visited):
continue
visited.add(curr_op)
if ((curr_op in op_to_task) and (curr_op not in stop)):
if is_parent_to_child:
queue.extend([consumer for consumer in get_consumers(curr_op) if (consumer in apply_grad_ancestor_ops)])
else:
queue.extend([input.op for input in curr_op.inputs])
continue
if is_parent_to_child:
placement_reference_ops = set([input.op for input in curr_op.inputs])
placement_reference_ops = placement_reference_ops.difference(ancestors_diff_descendants)
else:
placement_reference_ops = set(get_consumers(curr_op))
placement_reference_ops = placement_reference_ops.intersection(apply_grad_ancestor_ops)
is_ready = True
for ref_op in placement_reference_ops:
if (ref_op not in op_to_task):
is_ready = False
break
if is_ready:
placement_reference_tasks = [op_to_task[ref_op] for ref_op in placement_reference_ops]
else:
queue.append(curr_op)
continue
unique_tasks = set(placement_reference_tasks)
curr_op_task = None
if (len(unique_tasks) == 0):
raise RuntimeError(('Should have placement reference for operation %s' % curr_op.name))
elif (len(unique_tasks) == 1):
curr_op_task = unique_tasks.pop()
op_to_task[curr_op] = curr_op_task
else:
if (SHARED in unique_tasks):
unique_tasks.remove(SHARED)
if (len(unique_tasks) == 1):
curr_op_task = unique_tasks.pop()
op_to_task[curr_op] = curr_op_task
else:
assert (len(unique_tasks) > 1)
curr_op_task = SHARED
op_to_task[curr_op] = SHARED
parallax_log.debug(unique_tasks)
if (curr_op_task != SHARED):
parallax_log.debug(('post_grad_agg_op %s is assigned to %s task %d' % (curr_op.name, curr_op_task[0], curr_op_task[1])))
if (curr_op_task == SHARED):
curr_op_task = 0
ps_device.task = curr_op_task
if ((tf.DeviceSpec.from_string(curr_op.device).job != ps_device.job) or (tf.DeviceSpec.from_string(curr_op.device).task != ps_device.task)):
parallax_log.debug(('shared_op : %s - %s -> %s' % (curr_op.name, curr_op.device, ps_device.to_string())))
curr_op._set_device(ps_device)
assert (curr_op.device == ps_device.to_string())
else:
d = tf.DeviceSpec(job=curr_op_task[0], task=curr_op_task[1])
if ((tf.DeviceSpec.from_string(curr_op.device).job != d.job) or (tf.DeviceSpec.from_string(curr_op.device).task != d.task)):
parallax_log.debug(('local_op : %s - %s -> %s' % (curr_op.name, curr_op.device, d.to_string())))
curr_op._set_device(d)
assert (curr_op.device == d.to_string())
if (curr_op not in stop):
if is_parent_to_child:
queue.extend([consumer for consumer in get_consumers(curr_op) if (consumer in apply_grad_ancestor_ops)])
else:
queue.extend([input.op for input in curr_op.inputs])
op_to_task = {}
agg_grad_ops = []
for (var_op, agg_grad) in var_op_to_agg_grad.items():
var_device = tf.DeviceSpec.from_string(var_op.device)
if (agg_grad[0] != None):
agg_grad_ops.append(agg_grad[0].op)
op_to_task[agg_grad[0].op] = (var_device.job, var_device.task)
agg_grad_ops.append(agg_grad[1].op)
op_to_task[agg_grad[1].op] = (var_device.job, var_device.task)
apply_grad_ops = []
for (var_op, apply_grad_op) in var_op_to_apply_grad_op.items():
var_device = tf.DeviceSpec.from_string(var_op.device)
apply_grad_ops.append(apply_grad_op)
apply_grad_op._set_device(var_device)
op_to_task[apply_grad_op] = (var_device.job, var_device.task)
apply_grad_ancestor_ops = get_ancestors(apply_grad_ops, agg_grad_ops)
agg_grad_descendant_ops = _find_agg_grad_descendant_ops(agg_grad_ops, apply_grad_ops)
ancestors_diff_descendants = apply_grad_ancestor_ops.difference(agg_grad_descendant_ops)
parallax_log.debug(('apply_grad_ancestor_ops: %d' % len(apply_grad_ancestor_ops)))
parallax_log.debug(('agg_grad_descendant_ops: %d' % len(agg_grad_descendant_ops)))
parallax_log.debug(('ancestors diff descendants: %d' % len(ancestors_diff_descendants)))
parallax_log.debug(('descendants diff ancestors: %d' % len(agg_grad_descendant_ops.difference(apply_grad_ancestor_ops))))
parallax_log.debug('boundary_between_servers called')
before = {}
for op in tf.get_default_graph().get_operations():
before[op.name] = tf.DeviceSpec.from_string(op.device)
_assign(op_to_task, agg_grad_ops, apply_grad_ops, apply_grad_ancestor_ops, ancestors_diff_descendants, is_parent_to_child=True)
_assign(op_to_task, agg_grad_ops, apply_grad_ops, apply_grad_ancestor_ops, ancestors_diff_descendants, is_parent_to_child=False)
for op in tf.get_default_graph().get_operations():
if ((before[op.name].job != tf.DeviceSpec.from_string(op.device).job) or (before[op.name].task != tf.DeviceSpec.from_string(op.device).task)):
parallax_log.debug(('boundary between servers: %s, %s -> %s' % (op.name, before[op.name].to_string(), op.device))) |
_utils.test(exclude=[ti.metal, ti.opengl, ti.gles, ti.cuda, ti.vulkan, ti.amdgpu])
def test_node_manager():
def test():
impl.call_internal('test_node_allocator')
test()
test() |
def get_ckpt_epochs() -> List[int]:
paths = glob.glob(get_ckpt_path('*'))
return sorted([int(osp.basename(path).split('.')[0]) for path in paths]) |
def register_Ns3GenericMacHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::GenericMacHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetCi', 'uint8_t', [], is_const=True)
cls.add_method('GetCid', 'ns3::Cid', [], is_const=True)
cls.add_method('GetEc', 'uint8_t', [], is_const=True)
cls.add_method('GetEks', 'uint8_t', [], is_const=True)
cls.add_method('GetHcs', 'uint8_t', [], is_const=True)
cls.add_method('GetHt', 'uint8_t', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetLen', 'uint16_t', [], is_const=True)
cls.add_method('GetName', 'std::string', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetType', 'uint8_t', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetCi', 'void', [param('uint8_t', 'ci')])
cls.add_method('SetCid', 'void', [param('ns3::Cid', 'cid')])
cls.add_method('SetEc', 'void', [param('uint8_t', 'ec')])
cls.add_method('SetEks', 'void', [param('uint8_t', 'eks')])
cls.add_method('SetHcs', 'void', [param('uint8_t', 'hcs')])
cls.add_method('SetHt', 'void', [param('uint8_t', 'ht')])
cls.add_method('SetLen', 'void', [param('uint16_t', 'len')])
cls.add_method('SetType', 'void', [param('uint8_t', 'type')])
cls.add_method('check_hcs', 'bool', [], is_const=True)
return |
def test_invalid():
layout = ak.contents.RecordArray([ak.contents.NumpyArray([1, 2, 3]), ak.contents.NumpyArray([1, 2, 3])], ['x', 'x'])
assert (re.match(".*duplicate field 'x'.*", ak.validity_error(layout)) is not None) |
class QuantumManagerDensityFock(QuantumManager):
def __init__(self, truncation: int=1):
super().__init__(DENSITY_MATRIX_FORMALISM, truncation=truncation)
def new(self, state=None) -> int:
key = self._least_available
self._least_available += 1
if (state is None):
gnd = ([1] + ([0] * self.truncation))
self.states[key] = DensityState(gnd, [key], truncation=self.truncation)
else:
self.states[key] = DensityState(state, [key], truncation=self.truncation)
return key
def run_circuit(self, circuit: Circuit, keys: List[int], meas_samp=None) -> Dict[(int, int)]:
raise Exception('run_circuit method of class QuantumManagerDensityFock called')
def _generate_swap_operator(self, num_systems: int, i: int, j: int):
size = (self.dim ** num_systems)
swap_unitary = zeros((size, size))
for old_index in range(size):
old_str = base_repr(old_index, self.dim)
old_str = old_str.zfill(num_systems)
new_str = ''.join((old_str[:i], old_str[j], old_str[(i + 1):j], old_str[i], old_str[(j + 1):]))
new_index = int(new_str, base=self.dim)
swap_unitary[(new_index, old_index)] = 1
return swap_unitary
def _prepare_state(self, keys: List[int]):
old_states = []
all_keys = []
for key in keys:
qstate = self.states[key]
if (qstate.keys[0] not in all_keys):
old_states.append(qstate.state)
all_keys += qstate.keys
new_state = [1]
for state in old_states:
new_state = kron(new_state, state)
if (len(keys) > 1):
start_idx = all_keys.index(keys[0])
if ((start_idx + len(keys)) > len(all_keys)):
start_idx = (len(all_keys) - len(keys))
for (i, key) in enumerate(keys):
i = (i + start_idx)
j = all_keys.index(key)
if (j != i):
swap_unitary = self._generate_swap_operator(len(all_keys), i, j)
new_state = ((swap_unitary new_state) swap_unitary.T)
(all_keys[i], all_keys[j]) = (all_keys[j], all_keys[i])
return (new_state, all_keys)
def _prepare_operator(self, all_keys: List[int], keys: List[int], operator) -> array:
left_dim = (self.dim ** all_keys.index(keys[0]))
right_dim = (self.dim ** ((len(all_keys) - all_keys.index(keys[(- 1)])) - 1))
prepared_operator = operator
if (left_dim > 0):
prepared_operator = kron(identity(left_dim), prepared_operator)
if (right_dim > 0):
prepared_operator = kron(prepared_operator, identity(right_dim))
return prepared_operator
def apply_operator(self, operator: array, keys: List[int]):
(prepared_state, all_keys) = self._prepare_state(keys)
prepared_operator = self._prepare_operator(all_keys, keys, operator)
new_state = ((prepared_operator prepared_state) prepared_operator.conj().T)
self.set(all_keys, new_state)
def set(self, keys: List[int], state: List[List[complex]]) -> None:
super().set(keys, state)
new_state = DensityState(state, keys, truncation=self.truncation)
for key in keys:
self.states[key] = new_state
def set_to_zero(self, key: int):
gnd = ([1] + ([0] * self.truncation))
self.set([key], gnd)
def build_ladder(self):
truncation = self.truncation
data = array([sqrt((i + 1)) for i in range(truncation)])
row = array([(i + 1) for i in range(truncation)])
col = array([i for i in range(truncation)])
create = csr_matrix((data, (row, col)), shape=((truncation + 1), (truncation + 1))).toarray()
destroy = create.conj().T
return (create, destroy)
def measure(self, keys: List[int], povms: List[array], meas_samp: float) -> int:
(new_state, all_keys) = self._prepare_state(keys)
return self._measure(new_state, keys, all_keys, povms, meas_samp)
def _measure(self, state: List[List[complex]], keys: List[int], all_keys: List[int], povms: List[array], meas_samp: float) -> int:
state_tuple = tuple(map(tuple, state))
povm_tuple = tuple([tuple(map(tuple, povm)) for povm in povms])
new_state = None
result = 0
if (len(keys) == 1):
if (len(all_keys) == 1):
(states, probs) = measure_state_with_cache_fock_density(state_tuple, povm_tuple)
else:
key = keys[0]
num_states = len(all_keys)
state_index = all_keys.index(key)
(states, probs) = measure_entangled_state_with_cache_fock_density(state_tuple, state_index, num_states, povm_tuple, self.truncation)
else:
indices = tuple([all_keys.index(key) for key in keys])
(states, probs) = measure_multiple_with_cache_fock_density(state_tuple, indices, len(all_keys), povm_tuple, self.truncation)
prob_sum = cumsum(probs)
for (i, (output_state, p)) in enumerate(zip(states, prob_sum)):
if (meas_samp < p):
new_state = output_state
result = i
break
'\n # for potential future work\n result_digits = [int(x) for x in base_repr(result, base=self.dim)[2:]]\n while len(result_digits) < len(keys):\n result_digits.insert(0, 0)\n\n # assign measured states\n for key, result in zip(keys, result_digits):\n state = [0] * self.dim\n state[result] = 1\n self.set([key], state)\n '
for key in keys:
self.states[key] = None
if (len(keys) < len(all_keys)):
indices = tuple([all_keys.index(key) for key in keys])
new_state_tuple = tuple(map(tuple, new_state))
remaining_state = density_partial_trace(new_state_tuple, indices, len(all_keys), self.truncation)
remaining_keys = [key for key in all_keys if (key not in keys)]
self.set(remaining_keys, remaining_state)
return result
def _build_loss_kraus_operators(self, loss_rate: float, all_keys: List[int], key: int) -> List[array]:
assert (0 <= loss_rate <= 1)
kraus_ops = []
for k in range(self.dim):
total_kraus_op = zeros(((self.dim ** len(all_keys)), (self.dim ** len(all_keys))))
for n in range(k, self.dim):
coeff = (sqrt(binom(n, k)) * sqrt((((1 - loss_rate) ** (n - k)) * (loss_rate ** k))))
single_op = zeros((self.dim, self.dim))
single_op[((n - k), n)] = 1
total_op = self._prepare_operator(all_keys, [key], single_op)
total_kraus_op += (coeff * total_op)
kraus_ops.append(total_kraus_op)
return kraus_ops
def add_loss(self, key, loss_rate):
(prepared_state, all_keys) = self._prepare_state([key])
kraus_ops = self._build_loss_kraus_operators(loss_rate, all_keys, key)
output_state = zeros(prepared_state.shape, dtype=complex)
for kraus_op in kraus_ops:
output_state += ((kraus_op prepared_state) kraus_op.conj().T)
self.set(all_keys, output_state) |
.parametrize('mask_distance,expected', [(1, ((- 2.), (- 2.))), (2, ((- 0.), (- 0.))), (5, ((- 0.), (- 0.))), (10, ((- 0.), (- 0.))), (28, ((- 0.), (- 0.))), (50, ((- 0.), (- 0.)))])
def test_likelihood_batch_with_individual_masking_distance(msa_sampler, msa_batch_example, mask_distance, expected):
result = list(msa_sampler.log_likelihood_batch(msa_batch_example, target_index=4, with_masking=True, mask_distance=mask_distance))
assert (result[0][0] == pytest.approx(expected[0]))
assert (mean(result[0][1]) == pytest.approx(result[0][0]))
assert (result[1][0] == pytest.approx(expected[1]))
assert (mean(result[1][1]) == pytest.approx(result[1][0])) |
def _impl(array):
if isinstance(array, (ak.highlevel.Array, ak.highlevel.Record, ak.highlevel.ArrayBuilder)):
return array.to_list()
elif isinstance(array, (ak.contents.Content, ak.record.Record)):
return array.to_list(None)
elif isinstance(array, _ext.ArrayBuilder):
(formstr, length, container) = array.to_buffers()
form = ak.forms.from_json(formstr)
layout = ak.operations.from_buffers(form, length, container, byteorder=ak._util.native_byteorder)
return layout.to_list(None)
elif hasattr(array, 'tolist'):
return array.tolist()
elif hasattr(array, 'to_list'):
return array.to_list()
elif isinstance(array, Mapping):
return {k: _impl(v) for (k, v) in array.items()}
elif is_non_string_like_iterable(array):
return [_impl(x) for x in array]
else:
return array |
class CheckOnlineDocs(Step):
def action(self, context):
self.instruct('Check online docs')
open_website(URLs.DOCS_ONLINE) |
class Queue():
def __init__(self, size_max: int) -> None:
assert (size_max > 0)
self.max = size_max
self.head = 0
self.tail = 0
self.size = 0
self.data = array.array('i', range(size_max))
def empty(self) -> bool:
return (self.size != 0)
def full(self) -> bool:
return (self.size == self.max)
def enqueue(self, x: int) -> bool:
if (self.size == self.max):
return False
self.data[self.tail] = x
self.size += 1
self.tail += 1
if (self.tail == self.max):
self.tail = 0
return True
def dequeue(self) -> (int | None):
if (self.size == 0):
return None
x = self.data[self.head]
self.size -= 1
self.head += 1
if (self.head == self.max):
self.head = 0
return x |
def profileToProfile(im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL, outputMode=None, inPlace=False, flags=0):
if (outputMode is None):
outputMode = im.mode
if ((not isinstance(renderingIntent, int)) or (not (0 <= renderingIntent <= 3))):
raise PyCMSError('renderingIntent must be an integer between 0 and 3')
if ((not isinstance(flags, int)) or (not (0 <= flags <= _MAX_FLAG))):
raise PyCMSError(('flags must be an integer between 0 and %s' + _MAX_FLAG))
try:
if (not isinstance(inputProfile, ImageCmsProfile)):
inputProfile = ImageCmsProfile(inputProfile)
if (not isinstance(outputProfile, ImageCmsProfile)):
outputProfile = ImageCmsProfile(outputProfile)
transform = ImageCmsTransform(inputProfile, outputProfile, im.mode, outputMode, renderingIntent, flags=flags)
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut |
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--dataset-impl', 'raw'])
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw'])
generate_main(data_dir, ['--dataset-impl', 'raw'])
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_update_freq') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_max_positions') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'])
self.assertTrue(('skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception)))
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'])
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_sampling') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir, ['--sampling', '--temperature', '2', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--sampling', '--sampling-topk', '3', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--sampling', '--sampling-topp', '0.2', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--diversity-rate', '0.5', '--beam', '6'])
with self.assertRaises(ValueError):
generate_main(data_dir, ['--diverse-beam-groups', '4', '--match-source-len'])
generate_main(data_dir, ['--prefix-size', '2'])
generate_main(data_dir, ['--retain-dropout'])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_eval_bleu') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--eval-bleu', '--eval-bleu-print-samples', '--eval-bleu-remove-bpe', '--eval-bleu-detok', 'space', '--eval-bleu-args', '{"beam": 4, "min_len": 10}'])
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8'])
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm', ['--encoder-layers', '2', '--encoder-bidirectional', '--encoder-hidden-size', '16', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--decoder-layers', '2'])
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'], run_validation=True)
generate_main(data_dir)
def test_multilingual_transformer(self):
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_multilingual_transformer_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, arch='multilingual_transformer', task='multilingual_translation', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'multilingual_translation', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
((sys.platform.lower() == 'darwin'), 'skip latent depth test on MacOS')
def test_multilingual_translation_latent_depth(self):
encoder_latent_layer = [[], ['--encoder-latent-layer']]
decoder_latent_layer = [[], ['--decoder-latent-layer']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_latent_layer)):
for j in range(len(decoder_latent_layer)):
if ((i == 0) and (j == 0)):
continue
enc_ll_flag = encoder_latent_layer[i]
dec_ll_flag = decoder_latent_layer[j]
with tempfile.TemporaryDirectory(f'test_multilingual_translation_latent_depth_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=['--joined-dictionary'])
train_translation_model(data_dir, arch='latent_multilingual_transformer', task='multilingual_translation_latent_depth', extra_flags=((['--user-dir', 'examples/latent_depth/latent_depth_src', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--share-encoders', '--share-decoders', '--sparsity-weight', '0.1'] + enc_ll_flag) + dec_ll_flag), lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=((['--user-dir', 'examples/latent_depth/latent_depth_src'] + enc_ll_flag) + dec_ll_flag))
generate_main(data_dir, extra_flags=((['--user-dir', 'examples/latent_depth/latent_depth_src', '--task', 'multilingual_translation_latent_depth', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out'] + enc_ll_flag) + dec_ll_flag))
def test_translation_multi_simple_epoch(self):
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_translation_multi_simple_epoch_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=['--joined-dictionary'])
train_translation_model(data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5', '--virtual-epoch-size', '1000'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_translation_multi_simple_epoch_no_vepoch(self):
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ['--encoder-langtok', 'src']
dec_ltok_flag = ['--decoder-langtok']
with tempfile.TemporaryDirectory('test_translation_multi_simple_epoch_dict') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_translation_multi_simple_epoch_dicts(self):
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ['--encoder-langtok', 'src']
dec_ltok_flag = ['--decoder-langtok']
with tempfile.TemporaryDirectory('test_translation_multi_simple_epoch_dict') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5', '--virtual-epoch-size', '1000'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_translation_multi_simple_epoch_src_tgt_dict_spec(self):
with contextlib.redirect_stdout(StringIO()):
enc_ltok_flag = ['--encoder-langtok', 'src']
dec_ltok_flag = ['--decoder-langtok']
with tempfile.TemporaryDirectory('test_translation_multi_simple_epoch_dict') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, extra_flags=[])
train_translation_model(data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=((['--source-dict', f'{data_dir}/dict.in.txt', '--target-dict', f'{data_dir}/dict.out.txt', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5', '--virtual-epoch-size', '1000'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-embed-dim', '8', '--no-cross-attention', '--cross-self-attention'], run_validation=True)
generate_main(data_dir, extra_flags=[])
def test_transformer_pointer_generator(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_pointer_generator') as data_dir:
create_dummy_data(data_dir)
preprocess_summarization_data(data_dir)
train_translation_model(data_dir, 'transformer_pointer_generator', extra_flags=['--user-dir', 'examples/pointer_generator/pointer_generator_src', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--alignment-layer', '-1', '--alignment-heads', '1', '--source-position-markers', '0'], run_validation=True, extra_valid_flags=['--user-dir', 'examples/pointer_generator/pointer_generator_src'])
generate_main(data_dir, extra_flags=['--user-dir', 'examples/pointer_generator/pointer_generator_src'])
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', ['--encoder-conv-type', 'lightweight', '--decoder-conv-type', 'lightweight', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', ['--encoder-conv-type', 'dynamic', '--decoder-conv-type', 'dynamic', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'cmlm_transformer', ['--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'nonautoregressive_transformer', ['--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '0', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', ['--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--stochastic-approx', '--dae-ratio', '0.5', '--train-step', '3'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'insertion_transformer', ['--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'random_mask'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_moe') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--task', 'translation_moe', '--user-dir', 'examples/translation_moe/translation_moe_src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir, ['--task', 'translation_moe', '--user-dir', 'examples/translation_moe/translation_moe_src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--gen-expert', '0'])
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(data_dir, 'transformer_align', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--load-alignments', '--alignment-layer', '1', '--criterion', 'label_smoothed_cross_entropy_with_alignment'], run_validation=True)
generate_main(data_dir)
def test_alignment_full_context(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(data_dir, 'transformer_align', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--load-alignments', '--alignment-layer', '1', '--criterion', 'label_smoothed_cross_entropy_with_alignment', '--full-context-alignment'], run_validation=True)
generate_main(data_dir)
def test_transformer_layerdrop(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_layerdrop') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '3', '--decoder-layers', '3', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--encoder-layerdrop', '0.01', '--decoder-layerdrop', '0.01'])
generate_main(data_dir)
generate_main(data_dir, ['--model-overrides', "{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}"]) |
def make_batch_roberta_bert(sessions):
(batch_input, batch_labels, batch_speaker_tokens) = ([], [], [])
for session in sessions:
data = session[0]
label_list = session[1]
(context_speaker, context, emotion, sentiment) = data
now_speaker = context_speaker[(- 1)]
speaker_utt_list = []
inputString = ''
for (turn, (speaker, utt)) in enumerate(zip(context_speaker, context)):
inputString += (('<s' + str((speaker + 1))) + '> ')
inputString += (utt + ' ')
if ((turn < (len(context_speaker) - 1)) and (speaker == now_speaker)):
speaker_utt_list.append(encode_right_truncated(utt, bert_tokenizer, max_length=511))
concat_string = inputString.strip()
batch_input.append(encode_right_truncated(concat_string, roberta_tokenizer, max_length=511))
if (len(label_list) > 3):
label_ind = label_list.index(emotion)
else:
label_ind = label_list.index(sentiment)
batch_labels.append(label_ind)
batch_speaker_tokens.append(padding(speaker_utt_list, bert_tokenizer))
batch_input_tokens = padding(batch_input, roberta_tokenizer)
batch_labels = torch.tensor(batch_labels)
return (batch_input_tokens, batch_labels, batch_speaker_tokens) |
def _log_obj(name, obj, prefix):
if (name in ['wandb', 'dset', 'model']):
try:
obj = vars(obj)['_content']
except Exception:
return
if isinstance(obj, dict):
logger.info(f'{prefix}{name}:')
for (k, v) in obj.items():
_log_obj(k, v, (prefix + ' '))
else:
logger.info(f'{prefix}{name}: {obj}') |
def halo3d(x, a, sigma, array_size):
ar = np.zeros(array_size, dtype=float)
for i in range(array_size[0]):
for j in range(array_size[1]):
for k in range(array_size[2]):
dx = float(reduce((lambda foo, y: (foo + (y ** 2))), [0, (i - x[0]), (j - x[1]), (k - x[2])]))
ar[(i, j, k)] = (a * gaussian(dx, sigma))
return ar |
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus((- real_pred))
fake_loss = F.softplus(fake_pred)
return (real_loss.mean() + fake_loss.mean()) |
def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers):
indoor.create_indoor_info_file(root_path, info_prefix, out_dir, workers=workers) |
def type_check(param, value):
if isinstance(value, bool):
if (param in DEFAULTS[MAIN]['boolean']):
return True
elif isinstance(value, list):
if (param in DEFAULTS[MAIN]['list']):
list_type = type(DEFAULTS[MAIN]['list'][param][0])
if all((isinstance(elem, list_type) for elem in value)):
return True
elif isinstance(value, int):
if (param in DEFAULTS[MAIN]['integer']):
return True
elif isinstance(value, float):
if (param in DEFAULTS[MAIN]['float']):
return True
elif isinstance(value, str):
if (param in DEFAULTS[MAIN]['string']):
return True
return False |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, help='Dataset (or a single file) to process')
parser.add_argument('--output', type=str, help='Write the processed data here instead of clobbering')
parser.add_argument('--constituency_package', type=str, default=None, help='Constituency model to use for parsing')
parser.add_argument('--constituency_model', type=str, default=None, help='Specific model file to use for parsing')
parser.add_argument('--retag_package', type=str, default=None, help='Which tagger to use for retagging')
parser.add_argument('--split_mwt', action='store_true', help='Split MWT from the original sentences if the language has MWT')
parser.add_argument('--lang', type=str, default=None, help='Which language the dataset/file is in. If not specified, will try to use the dataset name')
args = parser.parse_args()
if os.path.exists(args.dataset):
expected_files = [args.dataset]
if args.output:
output_files = [args.output]
else:
output_files = expected_files
if (not args.lang):
(_, filename) = os.path.split(args.dataset)
args.lang = filename.split('_')[0]
print(('Guessing lang=%s based on the filename %s' % (args.lang, filename)))
else:
paths = default_paths.get_default_paths()
expected_files = [os.path.join(paths['SENTIMENT_DATA_DIR'], ('%s.%s.json' % (args.dataset, shard))) for shard in SHARDS]
if args.output:
output_files = [os.path.join(paths['SENTIMENT_DATA_DIR'], ('%s.%s.json' % (args.output, shard))) for shard in SHARDS]
else:
output_files = expected_files
for filename in expected_files:
if (not os.path.exists(filename)):
print(('Cannot find expected dataset file %s - rebuilding dataset' % filename))
prepare_sentiment_dataset.main(args.dataset)
break
if (not args.lang):
(args.lang, _) = args.dataset.split('_', 1)
print(('Guessing lang=%s based on the dataset name' % args.lang))
pipeline_args = {'lang': args.lang, 'processors': 'tokenize,pos,constituency', 'tokenize_pretokenized': True, 'pos_tqdm': True, 'constituency_tqdm': True}
package = {}
if (args.constituency_package is not None):
package['constituency'] = args.constituency_package
if (args.retag_package is not None):
package['pos'] = args.retag_package
if package:
pipeline_args['package'] = package
if (args.constituency_model is not None):
pipeline_args['constituency_model_path'] = args.constituency_model
pipe = stanza.Pipeline(**pipeline_args)
if args.split_mwt:
mwt_pipe = stanza.Pipeline(lang=args.lang, processors='tokenize')
if ('mwt' in mwt_pipe.processors):
print('This language has MWT. Will resplit any MWTs found in the dataset')
else:
print(('--split_mwt was requested, but %s does not support MWT!' % args.lang))
args.split_mwt = False
for (filename, output_filename) in zip(expected_files, output_files):
dataset = read_dataset(filename, WVType.OTHER, 1)
text = [x.text for x in dataset]
if args.split_mwt:
print(('Resplitting MWT in %d sentences from %s' % (len(dataset), filename)))
doc = resplit_mwt(text, mwt_pipe)
print(('Parsing %d sentences from %s' % (len(dataset), filename)))
doc = pipe(doc)
else:
print(('Parsing %d sentences from %s' % (len(dataset), filename)))
doc = pipe(text)
assert (len(dataset) == len(doc.sentences))
for (datum, sentence) in zip(dataset, doc.sentences):
datum.constituency = sentence.constituency
process_utils.write_list(output_filename, dataset) |
class RSCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
assert (img.size == mask.size)
crop_size = self.size
short_size = random.randint(int((self.size * 0.5)), int((self.size * 2.0)))
(w, h) = img.size
if (h > w):
ow = short_size
oh = int((((1.0 * h) * ow) / w))
else:
oh = short_size
ow = int((((1.0 * w) * oh) / h))
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
if (short_size < crop_size):
padh = ((crop_size - oh) if (oh < crop_size) else 0)
padw = ((crop_size - ow) if (ow < crop_size) else 0)
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
(w, h) = img.size
x1 = random.randint(0, (w - crop_size))
y1 = random.randint(0, (h - crop_size))
img = img.crop((x1, y1, (x1 + crop_size), (y1 + crop_size)))
mask = mask.crop((x1, y1, (x1 + crop_size), (y1 + crop_size)))
return (img, mask) |
class KMaxPool1d(nn.Module):
def __init__(self, k):
super().__init__()
self.k = k
def forward(self, inputs):
return kmax_pooling(inputs, 2, self.k)
def __repr__(self):
fmt_str = self.__class__.__name__
fmt_str += '(k={0})'.format(self.k)
return fmt_str |
def train_model():
args = get_args()
kwargs = args.__dict__
save_dir = kwargs['save_dir']
common.setup_logger(save_dir, log_name='autoregr_train.log', debug=kwargs['debug'])
pl.utilities.seed.seed_everything(kwargs.get('seed'))
yaml_args = yaml.dump(kwargs)
logging.info(f'''
{yaml_args}''')
with open((Path(save_dir) / 'args.yaml'), 'w') as fp:
fp.write(yaml_args)
dataset_name = kwargs['dataset_name']
data_dir = common.get_data_dir(dataset_name)
labels = (data_dir / kwargs['dataset_labels'])
split_file = ((data_dir / 'splits') / kwargs['split_name'])
df = pd.read_csv(labels, sep='\t')
if (kwargs['debug'] and (not kwargs['debug_overfit'])):
df = df[:100]
kwargs['num_workers'] = 0
spec_names = df['spec'].values
if kwargs['debug_overfit']:
(train_inds, val_inds, test_inds) = common.get_splits(spec_names, split_file, val_frac=0)
keep_list = ['nist_1135173', 'nist_1561727', 'nist_3162017', 'nist_1908759', 'nist_1156216', 'nist_1489699', 'nist_3150042', 'nist_1167122', 'nist_1431271', 'nist_3275065']
keep_list = ['nist_1489699']
interest_inds = np.argwhere([(i in keep_list) for i in spec_names]).flatten()
train_inds = np.array(interest_inds, dtype=np.int64)
val_inds = np.array([1])
test_inds = np.array([1])
kwargs['warmup'] = 0
else:
(train_inds, val_inds, test_inds) = common.get_splits(spec_names, split_file)
train_df = df.iloc[train_inds]
val_df = df.iloc[val_inds]
test_df = df.iloc[test_inds]
num_workers = kwargs.get('num_workers', 0)
subform_stem = kwargs.get('formula_folder', 0)
subformula_folder = ((data_dir / 'subformulae') / subform_stem)
subform_map = {i.stem: Path(i) for i in subformula_folder.glob('*.json')}
graph_featurizer = nn_utils.MolDGLGraph(pe_embed_k=kwargs['pe_embed_k'])
atom_feats = graph_featurizer.atom_feats
bond_feats = graph_featurizer.bond_feats
train_dataset = autoregr_data.AutoregrDataset(df=train_df, data_dir=data_dir, file_map=subform_map, graph_featurizer=graph_featurizer, use_ray=False, root_embedder=kwargs['root_embedder'], num_workers=num_workers)
val_dataset = autoregr_data.AutoregrDataset(df=val_df, data_dir=data_dir, graph_featurizer=graph_featurizer, file_map=subform_map, use_ray=False, root_embedder=kwargs['root_embedder'], num_workers=num_workers)
test_dataset = autoregr_data.AutoregrDataset(df=test_df, data_dir=data_dir, graph_featurizer=graph_featurizer, root_embedder=kwargs['root_embedder'], file_map=subform_map, use_ray=False, num_workers=num_workers)
collate_fn = train_dataset.get_collate_fn()
train_loader = DataLoader(train_dataset, num_workers=kwargs['num_workers'], collate_fn=collate_fn, shuffle=True, batch_size=kwargs['batch_size'])
val_loader = DataLoader(val_dataset, num_workers=kwargs['num_workers'], collate_fn=collate_fn, shuffle=False, batch_size=kwargs['batch_size'])
test_loader = DataLoader(test_dataset, num_workers=kwargs['num_workers'], collate_fn=collate_fn, shuffle=False, batch_size=kwargs['batch_size'])
test_batch = next(iter(train_loader))
model = autoregr_model.AutoregrNet(hidden_size=kwargs['hidden_size'], gnn_layers=kwargs['gnn_layers'], set_layers=kwargs['set_layers'], use_reverse=kwargs['use_reverse'], formula_dim=common.NORM_VEC.shape[0], mpnn_type=kwargs['mpnn_type'], dropout=kwargs['dropout'], learning_rate=kwargs['learning_rate'], weight_decay=kwargs['weight_decay'], atom_feats=atom_feats, bond_feats=bond_feats, pe_embed_k=kwargs['pe_embed_k'], pool_op=kwargs['pool_op'], num_atom_feats=graph_featurizer.num_atom_feats, num_bond_feats=graph_featurizer.num_bond_feats, lr_decay_rate=kwargs['lr_decay_rate'], warmup=kwargs.get('warmup', 1000), embedder=kwargs.get('embedder'), root_embedder=kwargs['root_embedder'], embed_adduct=kwargs['embed_adduct'])
monitor = 'val_loss'
if kwargs['debug']:
kwargs['max_epochs'] = 2
if kwargs['debug_overfit']:
kwargs['min_epochs'] = 2000
kwargs['max_epochs'] = None
kwargs['no_monitor'] = True
monitor = 'train_loss'
tb_logger = pl_loggers.TensorBoardLogger(save_dir, name='')
console_logger = common.ConsoleLogger()
tb_path = tb_logger.log_dir
checkpoint_callback = ModelCheckpoint(monitor=monitor, dirpath=tb_path, filename='best', save_weights_only=False)
earlystop_callback = EarlyStopping(monitor=monitor, patience=15)
callbacks = [earlystop_callback, checkpoint_callback]
trainer = pl.Trainer(logger=[tb_logger, console_logger], accelerator=('gpu' if kwargs['gpu'] else 'cpu'), gpus=(1 if kwargs['gpu'] else 0), callbacks=callbacks, gradient_clip_val=5, min_epochs=kwargs['min_epochs'], max_epochs=kwargs['max_epochs'], gradient_clip_algorithm='value')
if kwargs['debug_overfit']:
trainer.fit(model, train_loader)
else:
trainer.fit(model, train_loader, val_loader)
checkpoint_callback = trainer.checkpoint_callback
best_checkpoint = checkpoint_callback.best_model_path
best_checkpoint_score = checkpoint_callback.best_model_score.item()
model = autoregr_model.AutoregrNet.load_from_checkpoint(best_checkpoint)
logging.info(f'Loaded model with from {best_checkpoint} with val loss of {best_checkpoint_score}')
model.eval()
trainer.test(dataloaders=test_loader) |
def find_d_likelihood(ln, lk, n, k, ww):
return SMin(_compute_binomial_logl, args=(lk, k, ln, n, ww), bounds=((D_MIN + np.finfo(np.float16).eps), D_MAX), method='bounded').x |
def get_next_nonempty_states(sdfg: SDFG, state: SDFGState) -> Set[SDFGState]:
result: Set[SDFGState] = set()
for succ in sdfg.successors(state):
result |= set(dfs_conditional(sdfg, sources=[succ], condition=(lambda parent, _: parent.is_empty())))
result = {s for s in result if (not s.is_empty())}
return result |
class CustomDataset(BaseDataset):
def __init__(self, csv_name, is_training, study_level, transform_args, toy, return_info_dict, logger=None, data_args=None, stability_training=False):
super().__init__(csv_name, is_training, transform_args)
self.study_level = study_level
self.toy = toy
self.return_info_dict = return_info_dict
self.logger = logger
self.data_args = data_args
self.stability_training = stability_training
self.is_train_dataset = csv_name_check(self.csv_name, 'train.csv', ('special' in data_args.dataset))
self.is_test_dataset = csv_name_check(self.csv_name, 'test.csv', ('special' in data_args.dataset))
self.is_val_dataset = csv_name_check(self.csv_name, 'valid.csv', ('special' in data_args.dataset))
self.is_uncertain_dataset = ('uncertain' in self.csv_name)
if self.is_train_dataset:
self.csv_path = self.data_args.csv
elif self.is_uncertain_dataset:
self.csv_path = self.data_args.uncertain_map_path
elif self.is_val_dataset:
self.csv_path = self.data_args.csv_dev
elif self.is_test_dataset:
if self.data_args.together:
self.csv_path = self.data_args.test_csv
else:
self.csv_path = self.data_args.paths_csv
if self.is_val_dataset:
print('valid', self.csv_path)
df = self.load_df()
self.studies = df[COL_STUDY].drop_duplicates()
if (self.toy and csv_name_check(self.csv_name, 'train.csv', ('special' in data_args.dataset))):
self.studies = self.studies.sample(n=10)
df = df[df[COL_STUDY].isin(self.studies)]
df = df.reset_index(drop=True)
if self.study_level:
self.set_study_as_index(df)
self.labels = self.get_labels(df)
self.img_paths = self.get_paths(df)
def load_df(self):
df = pd.read_csv(self.csv_path)
df[COL_STUDY] = df[COL_PATH].apply((lambda p: Path(p).parent))
if (self.is_test_dataset and (not self.data_args.together)):
if self.data_args.custom:
gt_df = pd.read_csv(self.data_args.gt_csv)
df = pd.merge(df, gt_df, on=COL_STUDY, how='outer')
df = df.dropna(subset=['Path'])
df = df.rename(columns={'Lung Opacity': 'Airspace Opacity'}).sort_values(COL_STUDY)
if self.data_args.custom_tasks:
fill_tasks = NamedTasks[self.data_args.custom_tasks]
else:
fill_tasks = DATASET2TASKS[self.data_args.dataset]
df[fill_tasks] = df[fill_tasks].fillna(value=0)
return df
def set_study_as_index(self, df):
df.index = df[COL_STUDY]
def get_paths(self, df):
return df[COL_PATH]
def get_labels(self, df):
if self.study_level:
study_df = df.drop_duplicates(subset=COL_STUDY)
if self.data_args.custom_tasks:
labels = study_df[NamedTasks[self.data_args.custom_tasks]]
else:
labels = study_df[DATASET2TASKS[self.data_args.dataset]]
elif self.data_args.custom_tasks:
labels = df[NamedTasks[self.data_args.custom_tasks]]
else:
labels = df[DATASET2TASKS[self.data_args.dataset]]
return labels
def get_study(self, index):
study_path = self.studies.iloc[index]
label = self.labels.loc[study_path].values
label = torch.FloatTensor(label)
img_paths = pd.Series(self.img_paths.loc[study_path]).tolist()
imgs = []
from PIL import ExifTags
if (not ('special' in self.data_args.dataset)):
import numpy as np
for path in img_paths:
img = util.rotate_img(path)
if self.data_args.channels:
from PIL import ImageEnhance
img = img.convert('L')
up_channel = ImageEnhance.Brightness(img).enhance(1.1)
up_channel = ImageEnhance.Sharpness(up_channel).enhance(1.2)
up_channel = ImageEnhance.Contrast(up_channel).enhance(1.1).convert('L')
down_channel = ImageEnhance.Brightness(img).enhance(0.9)
down_channel = ImageEnhance.Sharpness(down_channel).enhance(1.0)
down_channel = ImageEnhance.Contrast(down_channel).enhance(0.9).convert('L')
img = Image.merge('RGB', (img, up_channel, down_channel))
imgs.append(img)
else:
imgs = [Image.open(path).convert('RGB') for path in img_paths]
imgs = [self.transform(img) for img in imgs]
imgs = torch.stack(imgs)
if self.return_info_dict:
info_dict = {'paths': study_path}
return (imgs, label, info_dict)
return (imgs, label)
def get_image(self, index):
label = self.labels.iloc[index].values
label = torch.FloatTensor(label)
img_path = self.img_paths.iloc[index]
if (not ('special' in self.data_args.dataset)):
from PIL import ExifTags
img = util.rotate_img(img_path)
if self.data_args.channels:
from PIL import ImageEnhance
img = img.convert('L')
up_channel = ImageEnhance.Brightness(img).enhance(1.1)
up_channel = ImageEnhance.Sharpness(up_channel).enhance(1.2)
up_channel = ImageEnhance.Contrast(up_channel).enhance(1.1).convert('L')
down_channel = ImageEnhance.Brightness(img).enhance(0.9)
down_channel = ImageEnhance.Sharpness(down_channel).enhance(1.0)
down_channel = ImageEnhance.Contrast(down_channel).enhance(0.9).convert('L')
img = Image.merge('RGB', (img, up_channel, down_channel))
else:
img = Image.open(img_path).convert('RGB')
img = self.transform(img)
if self.return_info_dict:
info_dict = {'paths': str(img_path)}
return (img, label, info_dict)
return (img, label)
def __getitem__(self, index):
if self.study_level:
return self.get_study(index)
else:
return self.get_image(index) |
_params({'X': ['array-like', 'sparse matrix'], 'y': ['array-like']}, prefer_skip_nested_validation=True)
def chi2(X, y):
X = check_array(X, accept_sparse='csr', dtype=(np.float64, np.float32))
if np.any(((X.data if issparse(X) else X) < 0)):
raise ValueError('Input X must be non-negative.')
Y = LabelBinarizer(sparse_output=True).fit_transform(y)
if (Y.shape[1] == 1):
Y = Y.toarray()
Y = np.append((1 - Y), Y, axis=1)
observed = safe_sparse_dot(Y.T, X)
if issparse(observed):
observed = observed.toarray()
feature_count = X.sum(axis=0).reshape(1, (- 1))
class_prob = Y.mean(axis=0).reshape(1, (- 1))
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected) |
def visits_per_time_unit(traj, time_unit='1h'):
return pd.DataFrame(traj[constants.DATETIME]).set_index(traj[constants.DATETIME]).groupby(pd.Grouper(freq=time_unit)).count().rename(columns={constants.DATETIME: 'n_visits'}) |
def length_to_string(len, bin_bound):
flag = False
for (i, bucket) in enumerate(bin_bound):
if ((len >= bucket[0]) and (len < bucket[1])):
id_ = i
flag = True
break
if (not flag):
raise ValueError("didn't find a bucket for length {}".format(len))
return '<len_{}>'.format(id_) |
def load_data(input_dir, bert_name, batch_size):
cache_fn = os.path.join(input_dir, 'processed.pt')
if os.path.exists(cache_fn):
print('Read from cache file: {} (NOTE: delete it if you modified data loading process)'.format(cache_fn))
with open(cache_fn, 'rb') as fp:
(ent2id, rel2id, triples, train_data, test_data) = pickle.load(fp)
print('Train number: {}, test number: {}'.format(len(train_data.dataset), len(test_data.dataset)))
else:
print('Read data...')
ent2id = {}
for line in open(os.path.join(input_dir, 'fbwq_full/entities.dict')):
l = line.strip().split('\t')
ent2id[l[0].strip()] = len(ent2id)
rel2id = {}
for line in open(os.path.join(input_dir, 'fbwq_full/relations.dict')):
l = line.strip().split('\t')
rel2id[l[0].strip()] = int(l[1])
triples = []
for line in open(os.path.join(input_dir, 'fbwq_full/train.txt')):
l = line.strip().split('\t')
s = ent2id[l[0].strip()]
p = rel2id[l[1].strip()]
o = ent2id[l[2].strip()]
triples.append((s, p, o))
p_rev = rel2id[(l[1].strip() + '_reverse')]
triples.append((o, p_rev, s))
triples = torch.LongTensor(triples)
train_data = DataLoader(input_dir, os.path.join(input_dir, 'QA_data/WebQuestionsSP/qa_train_webqsp.txt'), bert_name, ent2id, rel2id, batch_size, training=True)
test_data = DataLoader(input_dir, os.path.join(input_dir, 'QA_data/WebQuestionsSP/qa_test_webqsp.txt'), bert_name, ent2id, rel2id, batch_size)
with open(cache_fn, 'wb') as fp:
pickle.dump((ent2id, rel2id, triples, train_data, test_data), fp)
return (ent2id, rel2id, triples, train_data, test_data) |
.datainstrument
def test_dinstr_strided():
def dinstr(A: dace.float64[(20, 20)]):
tmp = (A + 1)
return (tmp + 5)
sdfg = dinstr.to_sdfg(simplify=True)
sdfg.arrays['tmp'].total_size = (32 * 32)
sdfg.arrays['tmp'].strides = (32, 1)
_instrument(sdfg, dace.DataInstrumentationType.Save, ignore='return')
A = np.random.rand(20, 20)
result = sdfg(A)
assert np.allclose(result, (A + 6))
dreport: InstrumentedDataReport = sdfg.get_instrumented_data()
assert np.allclose(dreport['A'], A)
assert np.allclose(dreport['tmp'], (A + 1))
tmp = dreport['tmp']
tmp *= 2
dreport.update_report()
_instrument(sdfg, dace.DataInstrumentationType.Restore, ignore='return')
result = sdfg.call_with_instrumented_data(dreport, A=A)
assert np.allclose(result, ((2 * A) + 7)) |
class Caffe2CompatibleConverter(object):
def __init__(self, replaceCls):
self.replaceCls = replaceCls
def create_from(self, module):
assert isinstance(module, torch.nn.Module)
if issubclass(self.replaceCls, GenericMixin):
new_class = type('{}MixedWith{}'.format(self.replaceCls.__name__, module.__class__.__name__), (self.replaceCls, module.__class__), {})
module.__class__ = new_class
else:
module.__class__ = self.replaceCls
if isinstance(module, Caffe2Compatible):
module.tensor_mode = False
return module |
def DF_calc(classes):
try:
return ((len(classes) - 1) ** 2)
except Exception:
return 'None' |
def ask_questions_in_text(sample, bridge_entity, num_sent=2, replace_with_ENT=True):
bridge_entity_name_in_table = bridge_entity['name']
bridge_entity_text_url = bridge_entity['url']
bridge_entity_text = get_passage(sample, bridge_entity_text_url, num_sent)
if (bridge_entity_text is None):
return []
(bridge_entity_name_in_text, _) = get_first_NER(bridge_entity_text)
if ((bridge_entity_name_in_text is None) or (bridge_entity_name_in_table is None)):
return []
if (not (phrase_overlap(bridge_entity_name_in_text, bridge_entity_name_in_table) == True)):
bridge_entity_name_in_text = None
table_entity_name = {'text': bridge_entity_name_in_text, 'table': bridge_entity_name_in_table}
QA_pairs = qg_nlp.qg_without_answer(bridge_entity_text)
filtered_QA_pairs = filter_generated_questions(QA_pairs, table_entity_name, replace_with_ENT)
return filtered_QA_pairs |
def subsample_dataset(dataset, idxs):
mask = np.zeros(len(dataset)).astype('bool')
mask[idxs] = True
dataset.data = dataset.data[mask]
dataset.uq_idxs = dataset.uq_idxs[mask]
return dataset |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--use-ggui', action='store_true', help='Display with GGUI')
parser.add_argument('-a', '--arch', required=False, default='cpu', dest='arch', type=str, help='The arch (backend) to run this example on')
(args, unknowns) = parser.parse_known_args()
arch = args.arch
if (arch in ['x64', 'cpu', 'arm64']):
ti.init(arch=ti.cpu)
elif (arch in ['cuda', 'gpu']):
ti.init(arch=ti.cuda)
else:
raise ValueError('Only CPU and CUDA backends are supported for now.')
h = 0.01
pause = False
cloth = Cloth(N=5)
use_ggui = args.use_ggui
if (not use_ggui):
gui = ti.GUI('Implicit Mass Spring System', res=(500, 500))
while gui.running:
for e in gui.get_events():
if (e.key == gui.ESCAPE):
gui.running = False
elif (e.key == gui.SPACE):
pause = (not pause)
if (not pause):
cloth.update(h)
cloth.display(gui)
gui.show()
else:
window = ti.ui.Window('Implicit Mass Spring System', res=(500, 500))
while window.running:
if window.get_event(ti.ui.PRESS):
if (window.event.key == ti.ui.ESCAPE):
break
if window.is_pressed(ti.ui.SPACE):
pause = (not pause)
if (not pause):
cloth.update(h)
canvas = window.get_canvas()
cloth.displayGGUI(canvas)
window.show() |
def stream_audio(filename):
is_tmp = False
if filename.startswith('s3://'):
tmpname = simpleutils.download_tmp_from_s3(filename)
is_tmp = True
filename = tmpname
try:
return WaveStream(filename, is_tmp=is_tmp)
except:
pass
try:
return ffmpeg_stream_audio(filename, is_tmp=is_tmp)
except:
if is_tmp:
os.unlink(tmpname)
raise |
class IndexStore(ABC):
def __init__(self, cleanup: bool=True):
self._index = None
self.cleanup = cleanup
def save_to_store(self, save_index: Callable[([str], None)]):
def load_index(self, init_index: Callable[([], None)], load_index: Callable[([Any, str], None)], configure_index: Callable[([Any], None)]) -> Any:
def dump_index(self, target_path: str): |
def _program_name(function) -> str:
result = ''
if ((function.__module__ is not None) and (function.__module__ != '__main__')):
result += (function.__module__.replace('.', '_') + '_')
return (result + function.__name__) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.