code stringlengths 101 5.91M |
|---|
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module |
class DictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
param_names = ['fit_algorithm', 'n_jobs']
params = (['lars', 'cd'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
(fit_algorithm, n_jobs) = params
estimator = DictionaryLearning(n_components=15, fit_algorithm=fit_algorithm, alpha=0.1, transform_alpha=1, max_iter=20, tol=1e-16, random_state=0, n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self) |
class LabelField(Field[torch.Tensor]):
_already_warned_namespaces: Set[str] = set()
def __init__(self, label: Union[(str, int)], label_namespace: str='labels', skip_indexing: bool=False) -> None:
self.label = label
self._label_namespace = label_namespace
self._label_id = None
self._maybe_warn_for_namespace(label_namespace)
if skip_indexing:
if (not isinstance(label, int)):
raise ConfigurationError('In order to skip indexing, your labels must be integers. Found label = {}'.format(label))
else:
self._label_id = label
elif (not isinstance(label, str)):
raise ConfigurationError('LabelFields must be passed a string label if skip_indexing=False. Found label: {} with type: {}.'.format(label, type(label)))
def _maybe_warn_for_namespace(self, label_namespace: str) -> None:
if (not (self._label_namespace.endswith('labels') or self._label_namespace.endswith('tags'))):
if (label_namespace not in self._already_warned_namespaces):
logger.warning("Your label namespace was '%s'. We recommend you use a namespace ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by default to your vocabulary. See documentation for `non_padded_namespaces` parameter in Vocabulary.", self._label_namespace)
self._already_warned_namespaces.add(label_namespace)
def count_vocab_items(self, counter: Dict[(str, Dict[(str, int)])]):
if (self._label_id is None):
counter[self._label_namespace][self.label] += 1
def index(self, vocab: Vocabulary):
if (self._label_id is None):
self._label_id = vocab.get_token_index(self.label, self._label_namespace)
def get_padding_lengths(self) -> Dict[(str, int)]:
return {}
def as_tensor(self, padding_lengths: Dict[(str, int)]) -> torch.Tensor:
tensor = torch.tensor(self._label_id, dtype=torch.long)
return tensor
def empty_field(self):
return LabelField((- 1), self._label_namespace, skip_indexing=True)
def __str__(self) -> str:
return f"LabelField with label: {self.label} in namespace: '{self._label_namespace}'.'" |
def plot_sensitivity(ax, alg, exp, alphas, sp, tp, performance, stderr, exp_attrs):
global plot_alpha
lbl = f'{alg}_{tp}'
ax.set_xscale('log', basex=2)
if (alg == 'ETD'):
color = 'red'
elif (alg == 'ETDLB'):
color = 'grey'
plot_alpha -= 0.1
else:
color = 'black'
ax.plot(alphas, performance, label=lbl, linestyle='-', marker='o', linewidth=2, markersize=5, color=color, alpha=plot_alpha)
ax.errorbar(alphas, performance, yerr=stderr, linestyle='', elinewidth=2, markersize=5, color=color, alpha=plot_alpha)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_ylim(exp_attrs.y_lim)
ax.yaxis.set_ticks(exp_attrs.y_axis_ticks)
ax.tick_params(axis='y', which='major', labelsize=exp_attrs.size_of_labels)
ax.xaxis.set_ticks(exp_attrs.x_axis_ticks_log)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2) |
def check_attr_ints_type(attr, node):
if (attr.type != AttributeProto.INTS):
raise ValueError(f'Only INTS is supported for {attr.name} in {node.op_type} op_type') |
def pt_acfg(**kwargs):
bn_cfg = (kwargs.pop('bn_cfg', None) or get_bn_args_pt())
return {'pad_type': 'LIKE', 'bn_cfg': bn_cfg, **kwargs} |
class TFBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, cls_token_at_end=False, cls_token='[CLS]', sep_token='[SEP]', pad_token=0, sequence_a_segment_id=0, sequence_b_segment_id=1, cls_token_segment_id=0, pad_token_segment_id=0, mask_padding_with_zero=True):
unique_id =
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if (len(query_tokens) > max_query_length):
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if (is_training and example.is_impossible):
tok_start_position = (- 1)
tok_end_position = (- 1)
if (is_training and (not example.is_impossible)):
tok_start_position = orig_to_tok_index[example.start_position]
if (example.end_position < (len(example.doc_tokens) - 1)):
tok_end_position = (orig_to_tok_index[(example.end_position + 1)] - 1)
else:
tok_end_position = (len(all_doc_tokens) - 1)
(tok_start_position, tok_end_position) = _improve_answer_span(all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text)
max_tokens_for_doc = ((max_seq_length - len(query_tokens)) - 3)
_DocSpan = collections.namedtuple('DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
while (start_offset < len(all_doc_tokens)):
length = (len(all_doc_tokens) - start_offset)
if (length > max_tokens_for_doc):
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if ((start_offset + length) == len(all_doc_tokens)):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
p_mask = []
if (not cls_token_at_end):
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
for token in query_tokens:
tokens.append(token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
for i in range(doc_span.length):
split_token_index = (doc_span.start + i)
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(sequence_b_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = (len(tokens) - 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(pad_token)
input_mask.append((0 if mask_padding_with_zero else 1))
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if (is_training and (not span_is_impossible)):
doc_start = doc_span.start
doc_end = ((doc_span.start + doc_span.length) - 1)
out_of_span = False
if (not ((tok_start_position >= doc_start) and (tok_end_position <= doc_end))):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = (len(query_tokens) + 2)
start_position = ((tok_start_position - doc_start) + doc_offset)
end_position = ((tok_end_position - doc_start) + doc_offset)
if (is_training and span_is_impossible):
start_position = cls_index
end_position = cls_index
if (example_index < 20):
logger.info('*** Example ***')
logger.info(('unique_id: %s' % unique_id))
logger.info(('example_index: %s' % example_index))
logger.info(('doc_span_index: %s' % doc_span_index))
logger.info(('tokens: %s' % ' '.join(tokens)))
logger.info(('token_to_orig_map: %s' % ' '.join([('%d:%d' % (x, y)) for (x, y) in token_to_orig_map.items()])))
logger.info(('token_is_max_context: %s' % ' '.join([('%d:%s' % (x, y)) for (x, y) in token_is_max_context.items()])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
logger.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
if (is_training and span_is_impossible):
logger.info('impossible example')
if (is_training and (not span_is_impossible)):
answer_text = ' '.join(tokens[start_position:(end_position + 1)])
logger.info(('start_position: %d' % start_position))
logger.info(('end_position: %d' % end_position))
logger.info(('answer: %s' % answer_text))
features.append(InputFeatures(unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, cls_index=cls_index, p_mask=p_mask, paragraph_len=paragraph_len, start_position=start_position, end_position=end_position, is_impossible=span_is_impossible))
unique_id += 1
return features |
def resnet101(pretrained=False, progress=True, device='cpu', **kwargs):
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, device, **kwargs) |
class InterfaceMagic():
def all_iter(cls):
try:
import sage.interfaces.all
except ImportError:
return
for (name, obj) in sage.interfaces.all.__dict__.items():
if isinstance(obj, (sage.interfaces.interface.Interface, sage.misc.lazy_import.LazyImport)):
(yield cls(name, obj))
def register_all(cls, shell=None):
if (shell is None):
shell = get_ipython()
for interface in cls.all_iter():
shell.register_magic_function(interface.line_magic_factory(), magic_name=interface._name, magic_kind='line')
shell.register_magic_function(interface.cell_magic_factory(), magic_name=interface._name, magic_kind='cell')
def find(cls, name):
for magic in cls.all_iter():
if (magic._name == name):
return magic
def __init__(self, name, interface):
self._name = name
self._interface = interface
def line_magic_factory(self):
terminal = get_display_manager().is_in_terminal()
def line_magic(line):
if line:
return self._interface(line)
elif terminal:
self._interface.interact()
else:
raise SyntaxError('{0} command required'.format(self._name))
line_magic.__doc__ = LINE_DOCSTRING.format(name=self._name)
return line_magic
def cell_magic_factory(self):
def cell_magic(line, cell):
if line:
raise SyntaxError('Interface magics have no options, got "{0}"'.format(line))
output = self._interface.eval(cell)
print(output)
cell_magic.__doc__ = CELL_DOCSTRING.format(name=self._name)
return cell_magic |
class VGG16():
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_layer(self, x, kernel_dim, input_dim, output_dim, trainable, activated, name='layer_conv', activation_function=tf.nn.relu):
with tf.variable_scope(name):
weight = tf.get_variable(name='weights', shape=[kernel_dim, kernel_dim, input_dim, output_dim], trainable=trainable, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable(name='biases', shape=[output_dim], trainable=trainable, initializer=tf.contrib.layers.xavier_initializer())
if activated:
out = activation_function((self.conv2d(x, weight) + bias))
else:
out = (self.conv2d(x, weight) + bias)
return out
def maxpool_layer(self, x, name):
with tf.name_scope(name):
maxpool = self.max_pool_2x2(x)
return maxpool
def VGG16_conv(self, x, keep_prob, trainable, name):
print('VGG16: trainable =', trainable)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
layer1_output = self.conv_layer(x, 3, 3, 64, trainable, True, 'conv1_1')
layer2_output = self.conv_layer(layer1_output, 3, 64, 64, trainable, True, 'conv1_2')
layer3_output = self.maxpool_layer(layer2_output, 'layer3_maxpool2x2')
layer4_output = self.conv_layer(layer3_output, 3, 64, 128, trainable, True, 'conv2_1')
layer5_output = self.conv_layer(layer4_output, 3, 128, 128, trainable, True, 'conv2_2')
layer6_output = self.maxpool_layer(layer5_output, 'layer6_maxpool2x2')
layer7_output = self.conv_layer(layer6_output, 3, 128, 256, trainable, True, 'conv3_1')
layer8_output = self.conv_layer(layer7_output, 3, 256, 256, trainable, True, 'conv3_2')
layer9_output = self.conv_layer(layer8_output, 3, 256, 256, trainable, True, 'conv3_3')
layer10_output = self.maxpool_layer(layer9_output, 'layer10_maxpool2x2')
layer11_output = self.conv_layer(layer10_output, 3, 256, 512, trainable, True, 'conv4_1')
layer11_output = tf.nn.dropout(layer11_output, keep_prob, name='conv4_1_dropout')
layer12_output = self.conv_layer(layer11_output, 3, 512, 512, trainable, True, 'conv4_2')
layer12_output = tf.nn.dropout(layer12_output, keep_prob, name='conv4_2_dropout')
layer13_output = self.conv_layer(layer12_output, 3, 512, 512, trainable, True, 'conv4_3')
layer13_output = tf.nn.dropout(layer13_output, keep_prob, name='conv4_3_dropout')
layer14_output = self.maxpool_layer(layer13_output, 'layer14_maxpool2x2')
layer15_output = self.conv_layer(layer14_output, 3, 512, 512, trainable, True, 'conv5_1')
layer15_output = tf.nn.dropout(layer15_output, keep_prob, name='conv5_1_dropout')
layer16_output = self.conv_layer(layer15_output, 3, 512, 512, trainable, True, 'conv5_2')
layer16_output = tf.nn.dropout(layer16_output, keep_prob, name='conv5_2_dropout')
layer17_output = self.conv_layer(layer16_output, 3, 512, 512, trainable, True, 'conv5_3')
layer17_output = tf.nn.dropout(layer17_output, keep_prob, name='conv5_3_dropout')
return layer17_output |
class FeedForward(nn.Module):
def __init__(self, dim, mult=4, dropout=0.0):
super().__init__()
self.net = nn.Sequential(nn.Linear(dim, ((dim * mult) * 2)), GEGLU(), nn.Linear((dim * mult), dim), nn.Dropout(dropout))
def forward(self, x):
return self.net(x) |
class TransFuse_S_adapt(nn.Module):
def __init__(self, num_classes=1, drop_rate=0.2, normal_init=True, pretrained=False, pretrained_folder='/bigdata/siyiplace/data/skin_lesion', num_domains=4):
super(TransFuse_S_adapt, self).__init__()
self.resnet = resnet34()
if pretrained:
self.resnet.load_state_dict(torch.load((pretrained_folder + '/pretrained/resnet34-333f7ec4.pth')))
self.resnet.fc = nn.Identity()
self.resnet.layer4 = nn.Identity()
self.transformer = deit_adapt(pretrained=pretrained, pretrained_folder=pretrained_folder, num_domains=num_domains)
self.up1 = Up(in_ch1=384, out_ch=128)
self.up2 = Up(128, 64)
self.final_x = nn.Sequential(Conv(256, 64, 1, bn=True, relu=True), Conv(64, 64, 3, bn=True, relu=True), Conv(64, num_classes, 3, bn=False, relu=False))
self.final_1 = nn.Sequential(Conv(64, 64, 3, bn=True, relu=True), Conv(64, num_classes, 3, bn=False, relu=False))
self.final_2 = nn.Sequential(Conv(64, 64, 3, bn=True, relu=True), Conv(64, num_classes, 3, bn=False, relu=False))
self.up_c = BiFusion_block(ch_1=256, ch_2=384, r_2=4, ch_int=256, ch_out=256, drop_rate=(drop_rate / 2))
self.up_c_1_1 = BiFusion_block(ch_1=128, ch_2=128, r_2=2, ch_int=128, ch_out=128, drop_rate=(drop_rate / 2))
self.up_c_1_2 = Up(in_ch1=256, out_ch=128, in_ch2=128, attn=True)
self.up_c_2_1 = BiFusion_block(ch_1=64, ch_2=64, r_2=1, ch_int=64, ch_out=64, drop_rate=(drop_rate / 2))
self.up_c_2_2 = Up(128, 64, 64, attn=True)
self.drop = nn.Dropout2d(drop_rate)
if normal_init:
self.init_weights()
def forward(self, imgs, domain_label, labels=None):
x_b = self.transformer(imgs, domain_label)
x_b = torch.transpose(x_b, 1, 2)
x_b = x_b.view(x_b.shape[0], (- 1), 16, 16)
x_b = self.drop(x_b)
x_b_1 = self.up1(x_b)
x_b_1 = self.drop(x_b_1)
x_b_2 = self.up2(x_b_1)
x_b_2 = self.drop(x_b_2)
x_u = self.resnet.conv1(imgs)
x_u = self.resnet.bn1(x_u)
x_u = self.resnet.relu(x_u)
x_u = self.resnet.maxpool(x_u)
x_u_2 = self.resnet.layer1(x_u)
x_u_2 = self.drop(x_u_2)
x_u_1 = self.resnet.layer2(x_u_2)
x_u_1 = self.drop(x_u_1)
x_u = self.resnet.layer3(x_u_1)
x_u = self.drop(x_u)
x_c = self.up_c(x_u, x_b)
x_c_1_1 = self.up_c_1_1(x_u_1, x_b_1)
x_c_1 = self.up_c_1_2(x_c, x_c_1_1)
x_c_2_1 = self.up_c_2_1(x_u_2, x_b_2)
x_c_2 = self.up_c_2_2(x_c_1, x_c_2_1)
map_x = F.interpolate(self.final_x(x_c), scale_factor=16, mode='bilinear', align_corners=True)
map_1 = F.interpolate(self.final_1(x_b_2), scale_factor=4, mode='bilinear', align_corners=True)
map_2 = F.interpolate(self.final_2(x_c_2), scale_factor=4, mode='bilinear', align_corners=True)
return (map_x, map_1, map_2)
def init_weights(self):
self.up1.apply(init_weights)
self.up2.apply(init_weights)
self.final_x.apply(init_weights)
self.final_1.apply(init_weights)
self.final_2.apply(init_weights)
self.up_c.apply(init_weights)
self.up_c_1_1.apply(init_weights)
self.up_c_1_2.apply(init_weights)
self.up_c_2_1.apply(init_weights)
self.up_c_2_2.apply(init_weights) |
def get_optimizer(opt_dict, model_params):
opt_dict = opt_dict.copy()
optimizer = _get_optimizer_instance(opt_dict)
opt_dict.pop('name')
optimizer = optimizer(model_params, **opt_dict)
return (optimizer, None) |
def get_config_section(filenames, section):
parser = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
parser.optionxform = str
files = parser.read(filenames)
if (len(files) == 0):
raise ValueError('Config files not found: {}'.format(filenames))
dict_session = dict(parser[section])
dict_session = {k: ast.literal_eval(v) for (k, v) in dict_session.items()}
return dict_session |
def R_inc_mtrx_transform(x, y, u, v, p, q):
cosIby2 = T.sqrt(((1 - (p * p)) - (q * q)))
x1 = (((1 - ((2 * p) * p)) * x) + (((2 * p) * q) * y))
y1 = ((((2 * p) * q) * x) + ((1 - ((2 * q) * q)) * y))
z1 = (((((- 2) * p) * cosIby2) * x) + (((2 * q) * cosIby2) * y))
u1 = (((1 - ((2 * p) * p)) * u) + (((2 * p) * q) * v))
v1 = ((((2 * p) * q) * u) + ((1 - ((2 * q) * q)) * v))
w1 = (((((- 2) * p) * cosIby2) * u) + (((2 * q) * cosIby2) * v))
return (x1, y1, z1, u1, v1, w1) |
def get_children(node: Union[(FASTNode, List[FASTNode])], child_type: str) -> List[FASTNode]:
... |
class TestFloat_power(object):
def test_type_conversion(self):
arg_type = '?bhilBHILefdgFDG'
res_type = 'ddddddddddddgDDG'
for (dtin, dtout) in zip(arg_type, res_type):
msg = ('dtin: %s, dtout: %s' % (dtin, dtout))
arg = np.ones(1, dtype=dtin)
res = np.float_power(arg, arg)
assert_((res.dtype.name == np.dtype(dtout).name), msg) |
class CDivTable(Module):
def __init__(self):
super(CDivTable, self).__init__()
self.gradInput = []
def updateOutput(self, input):
self.output.resize_as_(input[0]).copy_(input[0])
self.output.div_(input[1])
return self.output
def updateGradInput(self, input, gradOutput):
while (len(self.gradInput) < 2):
self.gradInput.append(input[0].new())
gradOutput = gradOutput.contiguous().view_as(input[0])
self.gradInput[0].resize_as_(input[0]).copy_(gradOutput).div_(input[1])
self.gradInput[1].resize_as_(input[1]).zero_().addcdiv_((- 1), self.gradInput[0], input[1]).mul_(input[0])
del self.gradInput[len(input):]
return self.gradInput |
class FPN(nn.Module):
def __init__(self, in_channels_list, out_channels):
super(FPN, self).__init__()
leaky = 0
if (out_channels <= 64):
leaky = 0.1
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)
self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)
self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)
def forward(self, input):
input = list(input.values())
output1 = self.output1(input[0])
output2 = self.output2(input[1])
output3 = self.output3(input[2])
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode='nearest')
output2 = (output2 + up3)
output2 = self.merge2(output2)
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode='nearest')
output1 = (output1 + up2)
output1 = self.merge1(output1)
out = [output1, output2, output3]
return out |
def results2markdown(result_dict):
table_data = []
is_multiple_results = False
for (cfg_name, value) in result_dict.items():
name = cfg_name.replace('configs/', '')
fps = value['fps']
ms_times_pre_image = value['ms_times_pre_image']
if isinstance(fps, list):
is_multiple_results = True
mean_fps = value['mean_fps']
mean_times_pre_image = value['mean_times_pre_image']
fps_str = ','.join([str(s) for s in fps])
ms_times_pre_image_str = ','.join([str(s) for s in ms_times_pre_image])
table_data.append([name, fps_str, mean_fps, ms_times_pre_image_str, mean_times_pre_image])
else:
table_data.append([name, fps, ms_times_pre_image])
if is_multiple_results:
table_data.insert(0, ['model', 'fps', 'mean_fps', 'times_pre_image(ms)', 'mean_times_pre_image(ms)'])
else:
table_data.insert(0, ['model', 'fps', 'times_pre_image(ms)'])
table = GithubFlavoredMarkdownTable(table_data)
print(table.table, flush=True) |
def confidence(bootstraps, output_path, confidence_level=0.95):
cb = ConfidenceGenerator(confidence_level=confidence_level)
df = cb.generate_cis(bootstraps)
df.to_csv(output_path, index=False) |
class DetrModel(metaclass=DummyObject):
_backends = ['timm', 'vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['timm', 'vision']) |
def test_unary_requires_root(unary_model):
test_parse_transitions.test_unary_requires_root(unary_model) |
class L1_Charbonnier_loss(torch.nn.Module):
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-06
def forward(self, X, Y):
diff = torch.add(X, (- Y))
error = torch.sqrt(((diff * diff) + self.eps))
loss = torch.mean(error)
return loss |
class GoogleHomeListDeviceActions(VirtualFunctionTool):
name = 'GoogleHomeListDeviceActions'
summary = 'Retrieves a list of possible actions that can be performed on a specified smart home device.'
parameters: List[ArgParameter] = [{'name': 'device_id', 'type': 'string', 'description': 'The unique identifier of the smart home device.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'actions', 'type': 'array', 'description': "A list of possible actions for the device, each is an object containing 'action_name' (e.g., 'turn on', 'turn off', 'adjust temperature', etc), 'action_description', and 'action_parameters' (a detailed description of the parameters required for the action, with their types, descriptions, and constraints)"}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'device_id' parameter is invalid or not supported."}] |
def equal(x, y, dtype=None):
if (dtype is None):
dtype = 'float32'
if isinstance(x, torch.Tensor):
x = x.numpy()
if isinstance(y, torch.Tensor):
y = y.numpy()
out = np.equal(x, y).astype(dtype)
return torch.tensor(out) |
class MultiClicker():
def __init__(self, fig):
self.cid = None
self.points = []
def onclick(event):
try:
print(('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' % (event.button, event.x, event.y, event.xdata, event.ydata)))
if (event.button == 3):
self.stop()
else:
self.points.append((event.xdata, event.ydata))
except:
raise
print('printing failed')
self.canvas = fig.canvas
self.cid = self.canvas.mpl_connect('button_press_event', onclick)
def stop(self):
self.canvas.mpl_disconnect(self.cid) |
def bimap(first, second):
return ({f: s for (f, s) in zip(first, second)}, {s: f for (f, s) in zip(first, second)}) |
def export_to_embedding_projector(lf):
lf.load_checkpoint(get_checkpoint_path(args))
lf.export_to_embedding_projector() |
def test_model(predictor: Predictor, hypotheses: Mapping[(str, str)], test_data: datasets.Dataset, result_file: Path, n_test_examples: Optional[int]):
labels = test_data.features['label']
if (n_test_examples is not None):
test_data = sample(test_data, seed=42, n_examples_per_label=n_test_examples)
examples = [{'text': text, 'label': None} for text in test_data['text']]
predictions = predictor.predict(hypotheses, examples)
result_file.parent.mkdir(parents=True, exist_ok=True)
pred_file = result_file.with_name((result_file.stem + '_predictions')).with_suffix('.jsonl')
with pred_file.open('tw') as writer:
for (text, pred, ref) in zip(test_data['text'], predictions, test_data['label']):
pred_label = labels.int2str(pred)
ref_label = labels.int2str(ref)
writer.write(json.dumps({'text': text, 'reference': ref_label, 'prediction': pred_label}))
writer.write('\n')
acc = accuracy_score(test_data['label'], predictions)
mf1 = macro_f1_score(test_data['label'], predictions)
with result_file.open('tw') as writer:
json.dump({'acc': (acc * 100.0), 'mf1': (mf1 * 100.0)}, writer, indent=' ') |
def print_range(x):
return (round(float(x.min()), 2), round(float(x.mean()), 2), round(float(x.max()), 2)) |
_start_docstrings('CamemBERT Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. ', CAMEMBERT_START_DOCSTRING)
class TFCamembertForTokenClassification(TFRobertaForTokenClassification):
config_class = CamembertConfig |
def yaml_to_cpp(reg_def_cpp, reg_def_yaml):
reg_yaml = ordered_yaml_load(reg_def_yaml)
gen_reg_def_cpp(reg_def_cpp, reg_yaml, reg_def_yaml) |
def test_part_of_speech():
nlp = stanfordnlp.Pipeline(**{'processors': 'tokenize,pos', 'models_dir': TEST_MODELS_DIR, 'lang': 'en'})
doc = nlp(EN_DOC)
assert (EN_DOC_GOLD == '\n\n'.join([sent.tokens_string() for sent in doc.sentences])) |
def simple_KD_train(xloader, teacher, network, criterion, scheduler, optimizer, optim_config, extra_info, print_freq, logger):
(loss, acc1, acc5) = procedure(xloader, teacher, network, criterion, scheduler, optimizer, 'train', optim_config, extra_info, print_freq, logger)
return (loss, acc1, acc5) |
_model
def efficientnet_lite2(pretrained=False, **kwargs):
model = _gen_efficientnet_lite('efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model |
def run_experiment(method_call=None, batch_tasks=None, exp_prefix='experiment', exp_name=None, log_dir=None, script='garage.experiment.experiment_wrapper', python_command='python', dry=False, env=None, variant=None, force_cpu=False, pre_commands=None, **kwargs):
if ((method_call is None) and (batch_tasks is None)):
raise Exception('Must provide at least either method_call or batch_tasks')
for task in (batch_tasks or [method_call]):
if (not hasattr(task, '__call__')):
raise ValueError('batch_tasks should be callable')
if (variant is None):
variant = dict()
if (batch_tasks is None):
batch_tasks = [dict(kwargs, pre_commands=pre_commands, method_call=method_call, exp_name=exp_name, log_dir=log_dir, env=env, variant=variant)]
global exp_count
if force_cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
for task in batch_tasks:
call = task.pop('method_call')
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
if (task.get('exp_name', None) is None):
task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp, exp_count)
if (task.get('log_dir', None) is None):
task['log_dir'] = '{log_dir}/local/{exp_prefix}/{exp_name}'.format(log_dir=osp.join(os.getcwd(), 'data'), exp_prefix=exp_prefix.replace('_', '-'), exp_name=task['exp_name'])
if (task.get('variant', None) is not None):
variant = task.pop('variant')
if ('exp_name' not in variant):
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(pickle.dumps(variant)).decode('utf-8')
elif ('variant' in task):
del task['variant']
task['env'] = (task.get('env', dict()) or dict())
task['env']['GARAGE_FORCE_CPU'] = str(force_cpu)
for task in batch_tasks:
env = task.pop('env', None)
command = to_local_command(task, python_command=python_command, script=script)
print(command)
if dry:
return
try:
if (env is None):
env = dict()
subprocess.run(command, shell=True, env=dict(os.environ, **env), check=True)
except Exception as e:
print(e)
raise |
_scheme(prefixes='pavi://')
def load_from_pavi(filename, map_location=None):
assert filename.startswith('pavi://'), f'Expected filename startswith `pavi://`, but get {filename}'
model_path = filename[7:]
try:
from pavi import modelcloud
except ImportError:
raise ImportError('Please install pavi to load checkpoint from modelcloud.')
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
return checkpoint |
def load_reference(path_to_reference):
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids |
class EndEffectorPoseViaIK(ArmActionMode):
def __init__(self, absolute_mode: bool=True, frame: str='world', collision_checking: bool=False):
self._absolute_mode = absolute_mode
self._frame = frame
self._collision_checking = collision_checking
if (frame not in ['world', 'end effector']):
raise ValueError("Expected frame to one of: 'world, 'end effector'")
def action(self, scene: Scene, action: np.ndarray):
assert_action_shape(action, (7,))
assert_unit_quaternion(action[3:])
if ((not self._absolute_mode) and (self._frame != 'end effector')):
action = calculate_delta_pose(scene.robot, action)
relative_to = (None if (self._frame == 'world') else scene.robot.arm.get_tip())
try:
joint_positions = scene.robot.arm.solve_ik_via_jacobian(action[:3], quaternion=action[3:], relative_to=relative_to)
scene.robot.arm.set_joint_target_positions(joint_positions)
except IKError as e:
raise InvalidActionError('Could not perform IK via Jacobian; most likely due to current end-effector pose being too far from the given target pose. Try limiting/bounding your action space.') from e
done = False
prev_values = None
while (not done):
scene.step()
cur_positions = scene.robot.arm.get_joint_positions()
reached = np.allclose(cur_positions, joint_positions, atol=0.01)
not_moving = False
if (prev_values is not None):
not_moving = np.allclose(cur_positions, prev_values, atol=0.001)
prev_values = cur_positions
done = (reached or not_moving)
def action_shape(self, scene: Scene) -> tuple:
return (7,) |
class TFCTRLPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class SetPartition(AbstractSetPartition, metaclass=InheritComparisonClasscallMetaclass):
def __classcall_private__(cls, parts, check=True):
P = SetPartitions()
return P.element_class(P, parts, check=check)
def __init__(self, parent, s, check=True):
self._latex_options = {}
ClonableArray.__init__(self, parent, sorted(map(frozenset, s), key=min), check=check)
def check(self):
if (self not in self.parent()):
raise ValueError(f'{self} is not an element of {self.parent()}')
def set_latex_options(self, **kwargs):
valid_args = ['tikz_scale', 'plot', 'color', 'fill', 'show_labels', 'radius', 'angle']
for key in kwargs:
if (key not in valid_args):
raise ValueError(f'unknown keyword argument: {key}')
if (key == 'plot'):
if (not ((kwargs['plot'] == 'cyclic') or (kwargs['plot'] == 'linear') or (kwargs['plot'] is None))):
raise ValueError("plot must be None, 'cyclic', or 'linear'")
self._latex_options.update(kwargs)
def latex_options(self):
opts = self._latex_options.copy()
if ('tikz_scale' not in opts):
opts['tikz_scale'] = 1
if ('plot' not in opts):
opts['plot'] = None
if ('color' not in opts):
opts['color'] = 'black'
if ('fill' not in opts):
opts['fill'] = False
if ('show_labels' not in opts):
opts['show_labels'] = True
if ('radius' not in opts):
opts['radius'] = '1cm'
if ('angle' not in opts):
opts['angle'] = 0
return opts
def _latex_(self):
latex_options = self.latex_options()
if (latex_options['plot'] is None):
return repr(self).replace('{', '\\{').replace('}', '\\}')
from sage.misc.latex import latex
latex.add_package_to_preamble_if_available('tikz')
res = '\\begin{{tikzpicture}}[scale={}]\n'.format(latex_options['tikz_scale'])
cardinality = self.base_set_cardinality()
from sage.rings.integer_ring import ZZ
if all(((x in ZZ) for x in self.base_set())):
sort_key = ZZ
else:
sort_key = str
base_set = sorted(self.base_set(), key=sort_key)
color = latex_options['color']
if (latex_options['plot'] == 'cyclic'):
degrees = (360 // cardinality)
radius = latex_options['radius']
res += '\\draw (0,0) circle [radius={}];\n'.format(radius)
for (k, i) in enumerate(base_set):
location = (((cardinality - k) * degrees) - 270)
if latex_options['show_labels']:
res += '\\node[label={}:{}]'.format(location, i)
else:
res += '\\node'
res += ' ({}) at ({}:{}) {{}};\n'.format(k, location, radius)
for partition in sorted(self, key=str):
res += ('\\draw[-,thick,color=' + color)
if (latex_options['fill'] is not False):
if isinstance(latex_options['fill'], str):
res += (',fill=' + latex_options['fill'])
else:
res += ',fill={},fill opacity=0.1'.format(color)
res += '] '
res += ' -- '.join(('({}.center)'.format(base_set.index(j)) for j in sorted(partition, key=sort_key)))
res += ' -- cycle;\n'
for k in range(len(base_set)):
res += '\\fill[color=black] ({}) circle (1.5pt);\n'.format(k)
elif (latex_options['plot'] == 'linear'):
angle = latex_options['angle']
for (k, i) in enumerate(base_set):
if latex_options['show_labels']:
res += '\\node[below=.05cm] at ({},0) {{${}$}};\n'.format(k, i)
res += '\\node[draw,circle, inner sep=0pt, minimum width=4pt, fill=black] '
res += '({k}) at ({k},0) {{}};\n'.format(k=k)
for partition in sorted(self, key=str):
p = sorted(partition, key=sort_key)
if (len(p) <= 1):
continue
for k in range(1, len(p)):
res += '\\draw[color={}] ({})'.format(color, base_set.index(p[k]))
res += ' to [out={},in={}] '.format((90 + angle), (90 - angle))
res += '({});\n'.format(base_set.index(p[(k - 1)]))
else:
raise ValueError("plot must be None, 'cyclic', or 'linear'")
res += '\\end{tikzpicture}'
return res
cardinality = ClonableArray.__len__
size = AbstractSetPartition.base_set_cardinality
def pipe(self, other):
parts = list(self)
n = self.base_set_cardinality()
for newpart in other:
raised_newpart = Set(((i + n) for i in newpart))
parts.append(raised_newpart)
return SetPartition(parts)
_map(name='shape')
def shape(self):
return Partition(sorted(map(len, self), reverse=True))
shape_partition = shape
to_partition = shape
_map(name='to permutation')
def to_permutation(self):
return Permutation(tuple(map(tuple, self.standard_form())))
def to_restricted_growth_word(self, bijection='blocks'):
if (bijection == 'blocks'):
return self.to_restricted_growth_word_blocks()
if (bijection == 'intertwining'):
return self.to_restricted_growth_word_intertwining()
raise ValueError('the given bijection is not valid')
def to_restricted_growth_word_blocks(self):
w = ([0] * self.size())
for (i, B) in enumerate(self):
for j in B:
w[(j - 1)] = i
return w
def to_restricted_growth_word_intertwining(self):
A = sorted(self.arcs())
O = (min(B) for B in self)
C = [max(B) for B in self]
I = ([0] * self.size())
for i in O:
I[(i - 1)] = (sum((1 for (k, l) in A if (k < i < l))) + sum((1 for k in C if (k < i))))
for (i, j) in A:
I[(j - 1)] = (sum((1 for (k, l) in A if (i < k < j < l))) + sum((1 for k in C if (i < k < j))))
return I
def openers(self):
return sorted([min(B) for B in self])
def closers(self):
return sorted([max(B) for B in self])
def to_rook_placement(self, bijection='arcs'):
if (bijection == 'arcs'):
return self.arcs()
if (bijection == 'gamma'):
return self.to_rook_placement_gamma()
if (bijection == 'rho'):
return self.to_rook_placement_rho()
if (bijection == 'psi'):
return self.to_rook_placement_psi()
raise ValueError('the given map is not valid')
def to_rook_placement_gamma(self):
n = self.size()
if (n == 0):
return []
w = self.to_restricted_growth_word_blocks()
EC = sorted([w.index(i) for i in range((max(w) + 1))])
rooks = []
R = []
for c in range(n):
if (c not in EC):
r = 0
w_c = w[c]
while ((w_c > 0) or (r in R)):
if (r not in R):
w_c -= 1
r += 1
rooks.append(((n - c), (n - r)))
R.append(r)
return sorted(rooks)
def to_rook_placement_rho(self):
n = self.size()
if (n == 0):
return []
w = self.to_restricted_growth_word_blocks()
w_rev = w[::(- 1)]
R = sorted([((n - w_rev.index(i)) - 1) for i in range((max(w) + 1))])
rs = [sum((1 for j in R if ((j > i) and (w[j] < w[i])))) for i in range(n)]
EC = [(n - j) for j in R]
rooks = []
for i in range(1, n):
U = [j for j in range(((n + 1) - i), (n + 1)) if (j not in EC)]
if (rs[i] < len(U)):
j = U[rs[i]]
rooks.append((((n + 1) - j), (i + 1)))
EC.append(j)
return sorted(rooks)
def to_rook_placement_psi(self):
n = self.size()
degrees = []
P = [sorted(e) for e in self]
for j in range(n, 0, (- 1)):
B = next((B for B in P if (B[(- 1)] == j)))
if (len(B) == 1):
P.remove(B)
else:
del B[(- 1)]
P = sorted(P, key=(lambda B: ((- len(B)), min(B))))
b = P.index(B)
i = ((j - b) - 1)
degrees.append((j, i))
rooks = []
attacked_rows = []
for (j, d) in reversed(degrees):
i = 1
while (d > (i + sum((1 for r in attacked_rows if (r > i))))):
i += 1
attacked_rows.append(i)
rooks.append((i, j))
return sorted(rooks)
def apply_permutation(self, p):
return self.__class__(self.parent(), [Set(map(p, B)) for B in self])
def crossings_iterator(self):
arcs = sorted(self.arcs(), key=min)
while arcs:
(i1, j1) = arcs.pop(0)
for (i2, j2) in arcs:
if (i2 < j1 < j2):
(yield ((i1, j1), (i2, j2)))
def crossings(self):
return list(self.crossings_iterator())
def number_of_crossings(self):
return Integer(len(list(self.crossings_iterator())))
def is_noncrossing(self) -> bool:
it = self.crossings_iterator()
try:
next(it)
except StopIteration:
return True
return False
def nestings_iterator(self):
arcs = sorted(self.arcs(), key=min)
while arcs:
(i1, j1) = arcs.pop(0)
for (i2, j2) in arcs:
if (i2 < j2 < j1):
(yield ((i1, j1), (i2, j2)))
def nestings(self):
return list(self.nestings_iterator())
def number_of_nestings(self):
c = Integer(0)
one = Integer(1)
for _ in self.nestings_iterator():
c += one
return c
def is_nonnesting(self) -> bool:
it = self.nestings_iterator()
try:
next(it)
except StopIteration:
return True
return False
def is_atomic(self) -> bool:
if (len(self) == 0):
return False
maximum_so_far = max(self[0])
for S in self[1:]:
if (maximum_so_far < min(S)):
return False
maximum_so_far = max(maximum_so_far, max(S))
return True
def standardization(self):
r = {e: i for (i, e) in enumerate(sorted(self.base_set()), 1)}
return SetPartitions(len(r))([[r[e] for e in b] for b in self])
def restriction(self, I):
ret = []
for part in self:
newpart = [i for i in part if (i in I)]
if (len(newpart) != 0):
ret.append(newpart)
return SetPartition(ret)
def ordered_set_partition_action(self, s):
cur = 1
ret = []
for part in s:
sub_parts = [list(self[(i - 1)]) for i in part]
mins = [min(i) for i in sub_parts]
over_max = (max(map(max, sub_parts)) + 1)
temp = [[] for _ in repeat(None, len(part))]
while (min(mins) != over_max):
m = min(mins)
i = mins.index(m)
temp[i].append(cur)
cur += 1
sub_parts[i].pop(sub_parts[i].index(m))
if (len(sub_parts[i]) != 0):
mins[i] = min(sub_parts[i])
else:
mins[i] = over_max
ret += temp
return SetPartition(ret)
def refinements(self):
L = [SetPartitions(part) for part in self]
return [SetPartition(sum(map(list, x), [])) for x in itertools.product(*L)]
def strict_coarsenings(self):
todo = [self]
visited = set([self])
ret = [self]
while todo:
A = todo.pop()
for (i, part) in enumerate(A):
for (j, other) in enumerate(A[(i + 1):]):
if (max(part) < min(other)):
next_pi = A[:i]
next_pi.append(part.union(other))
next_pi += (A[(i + 1):((i + 1) + j)] + A[((i + j) + 2):])
next_pi = SetPartition(next_pi)
if (next_pi not in visited):
todo.append(next_pi)
visited.add(next_pi)
ret.append(next_pi)
return ret
def arcs(self):
arcs = []
for p in self:
p = sorted(p)
for i in range((len(p) - 1)):
arcs.append((p[i], p[(i + 1)]))
return arcs
def plot(self, angle=None, color='black', base_set_dict=None):
from sage.plot.graphics import Graphics
from sage.plot.point import point
from sage.plot.text import text
from sage.plot.arc import arc
from sage.symbolic.constants import pi
from sage.functions.trig import tan, sin
from sage.functions.generalized import sgn
diag = Graphics()
sorted_vertices_list = list(self.base_set())
sorted_vertices_list.sort()
if (angle is None):
angle = (pi / 4)
if (base_set_dict is not None):
vertices_dict = base_set_dict
else:
vertices_dict = {val: pos for (pos, val) in enumerate(sorted_vertices_list)}
for elt in vertices_dict:
pos = vertices_dict[elt]
diag += point((pos, 0), size=30, color=color)
diag += text(elt, (pos, ((- sgn(angle)) * 0.1)), color=color)
for (k, j) in self.arcs():
(pos_k, pos_j) = (float(vertices_dict[k]), float(vertices_dict[j]))
center = (((pos_k + pos_j) / 2), ((- abs((pos_j - pos_k))) / (2 * tan(angle))))
r1 = abs(((pos_j - pos_k) / (2 * sin(angle))))
sector = ((sgn(angle) * ((pi / 2) - angle)), (sgn(angle) * ((pi / 2) + angle)))
diag += arc(center=center, r1=r1, sector=sector, color=color)
diag.axes(False)
return diag |
class BayesianRegressionModel(PyroSviTrainMixin, PyroSampleMixin, BaseModelClass):
def __init__(self, adata: AnnData, per_cell_weight=False):
clear_param_store()
super().__init__(adata)
self.module = BayesianRegressionModule(in_features=adata.shape[1], out_features=1, per_cell_weight=per_cell_weight)
self._model_summary_string = 'BayesianRegressionModel'
self.init_params_ = self._get_init_params(locals())
def setup_anndata(cls, adata: AnnData, **kwargs) -> (AnnData | None):
setup_method_args = cls._get_setup_method_args(**locals())
adata.obs['_indices'] = np.arange(adata.n_obs).astype('int64')
anndata_fields = [LayerField(REGISTRY_KEYS.X_KEY, None, is_count_data=True), CategoricalObsField(REGISTRY_KEYS.LABELS_KEY, None), NumericalObsField(REGISTRY_KEYS.INDICES_KEY, '_indices')]
adata_manager = AnnDataManager(fields=anndata_fields, setup_method_args=setup_method_args)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager) |
.mlir
def test_mlir_tasklet_float():
A = dace.ndarray((1,), dace.float32)
B = dace.ndarray((1,), dace.float32)
C = dace.ndarray((1,), dace.float32)
A[:] = 5.5
B[:] = 2.2
C[:] = 15.15
mlir_tasklet_float(A, B, C)
assert np.allclose(C[0], 7.7) |
def _write_single_frame(im, fp, palette):
im_out = _normalize_mode(im, True)
for (k, v) in im_out.info.items():
im.encoderinfo.setdefault(k, v)
im_out = _normalize_palette(im_out, palette, im.encoderinfo)
for s in _get_global_header(im_out, im.encoderinfo):
fp.write(s)
flags = 0
if get_interlace(im):
flags = (flags | 64)
_write_local_header(fp, im, (0, 0), flags)
im_out.encoderconfig = (8, get_interlace(im))
ImageFile._save(im_out, fp, [('gif', ((0, 0) + im.size), 0, RAWMODE[im_out.mode])])
fp.write(b'\x00') |
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.eval()
end = time.time()
for (i, (images, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i) |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_pl_regon(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format in {'compact', 'standard'}):
result = ([regon.compact(val)] + result)
return result |
_display_as_base
class _UFuncInputCastingError(_UFuncCastingError):
def __init__(self, ufunc, casting, from_, to, i):
super().__init__(ufunc, casting, from_, to)
self.in_i = i
def __str__(self):
i_str = ('{} '.format(self.in_i) if (self.ufunc.nin != 1) else '')
return 'Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting rule {!r}'.format(self.ufunc.__name__, i_str, self.from_, self.to, self.casting) |
class TFXLMRobertaForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def apply_succ(prob_letters):
return [prob_letters[:(- 1)], (prob_letters[:(- 2)] + [prob_letters[(- 1)]])] |
class PairMarginMiner(BaseTupleMiner):
def __init__(self, pos_margin=0.2, neg_margin=0.8, **kwargs):
super().__init__(**kwargs)
self.pos_margin = pos_margin
self.neg_margin = neg_margin
self.add_to_recordable_attributes(list_of_names=['pos_margin', 'neg_margin'], is_stat=False)
self.add_to_recordable_attributes(list_of_names=['pos_pair_dist', 'neg_pair_dist'], is_stat=True)
def mine(self, embeddings, labels, ref_emb, ref_labels):
mat = self.distance(embeddings, ref_emb)
(a1, p, a2, n) = lmu.get_all_pairs_indices(labels, ref_labels)
pos_pair = mat[(a1, p)]
neg_pair = mat[(a2, n)]
self.set_stats(pos_pair, neg_pair)
pos_mask = ((pos_pair < self.pos_margin) if self.distance.is_inverted else (pos_pair > self.pos_margin))
neg_mask = ((neg_pair > self.neg_margin) if self.distance.is_inverted else (neg_pair < self.neg_margin))
return (a1[pos_mask], p[pos_mask], a2[neg_mask], n[neg_mask])
def set_stats(self, pos_pair, neg_pair):
if self.collect_stats:
with torch.no_grad():
self.pos_pair_dist = (torch.mean(pos_pair).item() if (len(pos_pair) > 0) else 0)
self.neg_pair_dist = (torch.mean(neg_pair).item() if (len(neg_pair) > 0) else 0) |
def get_transforms(config: ((str | A.Compose) | None)=None, image_size: ((int | tuple) | None)=None, to_tensor: bool=True) -> A.Compose:
warnings.warn(DeprecationWarning('The function anomalib.pre_processing.pre_process.get_transforms is deprecated and will be removed in a future release. Please use anomalib.data.utils.transform.get_transforms instead.'))
if (config is None is image_size):
raise ValueError('Both config and image_size cannot be `None`. Provide either config file to de-serialize transforms or image_size to get the default transformations')
transforms: A.Compose
if ((config is None) and (image_size is not None)):
logger.warning('Transform configs has not been provided. Images will be normalized using ImageNet statistics.')
(height, width) = get_image_height_and_width(image_size)
transforms = A.Compose([A.Resize(height=height, width=width, always_apply=True), A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), ToTensorV2()])
if (config is not None):
if isinstance(config, str):
transforms = A.load(filepath=config, data_format='yaml')
elif isinstance(config, A.Compose):
transforms = config
else:
raise ValueError('config could be either ``str`` or ``A.Compose``')
if (not to_tensor):
if isinstance(transforms[(- 1)], ToTensorV2):
transforms = A.Compose(transforms[:(- 1)])
if ((not any((isinstance(transform, A.Resize) for transform in transforms))) and (image_size is not None)):
(height, width) = get_image_height_and_width(image_size)
transforms = A.Compose([A.Resize(height=height, width=width, always_apply=True), transforms])
return transforms |
def main(args):
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(logging.FileHandler(filename=os.path.join(args.destdir, 'preprocess.log')))
logger.info(args)
task = tasks.get_task(args.task)
def train_path(lang):
return '{}{}'.format(args.trainpref, (('.' + lang) if lang else ''))
def file_name(prefix, lang):
fname = prefix
if (lang is not None):
fname += '.{lang}'.format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return (dest_path('dict', lang) + '.txt')
def build_dictionary(filenames, src=False, tgt=False):
assert (src ^ tgt)
return task.build_dictionary(filenames, workers=args.workers, threshold=(args.thresholdsrc if src else args.thresholdtgt), nwords=(args.nwordssrc if src else args.nwordstgt), padding_factor=args.padding_factor)
target = (not args.only_source)
if ((not args.srcdict) and os.path.exists(dict_path(args.source_lang))):
raise FileExistsError(dict_path(args.source_lang))
if (target and (not args.tgtdict) and os.path.exists(dict_path(args.target_lang))):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert ((not args.srcdict) or (not args.tgtdict)), 'cannot use both --srcdict and --tgtdict with --joined-dictionary'
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, '--trainpref must be set if --srcdict is not specified'
src_dict = build_dictionary({train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert args.trainpref, '--trainpref must be set if --srcdict is not specified'
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, '--trainpref must be set if --tgtdict is not specified'
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if (target and (tgt_dict is not None)):
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
logger.info('[{}] Dictionary: {} types'.format(lang, len(vocab)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result['replaced'])
n_seq_tok[0] += worker_result['nseq']
n_seq_tok[1] += worker_result['ntok']
input_file = '{}{}'.format(input_prefix, (('.' + lang) if (lang is not None) else ''))
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if (num_workers > 1):
pool = Pool(processes=(num_workers - 1))
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
pool.apply_async(binarize, (args, input_file, vocab, prefix, lang, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab))
merge_result(Binarizer.binarize(input_file, vocab, (lambda t: ds.add_item(t)), offset=0, end=offsets[1]))
if (num_workers > 1):
pool.join()
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx'))
logger.info('[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(lang, input_file, n_seq_tok[0], n_seq_tok[1], ((100 * sum(replaced.values())) / n_seq_tok[1]), vocab.unk_word))
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result['nseq']
input_file = input_prefix
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if (num_workers > 1):
pool = Pool(processes=(num_workers - 1))
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
pool.apply_async(binarize_alignments, (args, input_file, utils.parse_alignment, prefix, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, 'bin'), impl=args.dataset_impl)
merge_result(Binarizer.binarize_alignments(input_file, utils.parse_alignment, (lambda t: ds.add_item(t)), offset=0, end=offsets[1]))
if (num_workers > 1):
pool.join()
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, 'idx'))
logger.info('[alignments] {}: parsed {} alignments'.format(input_file, nseq[0]))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if (args.dataset_impl == 'raw'):
output_text_file = dest_path((output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang)), lang)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, 'train', lang, num_workers=args.workers)
if args.validpref:
for (k, validpref) in enumerate(args.validpref.split(',')):
outprefix = ('valid{}'.format(k) if (k > 0) else 'valid')
make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers)
if args.testpref:
for (k, testpref) in enumerate(args.testpref.split(',')):
outprefix = ('test{}'.format(k) if (k > 0) else 'test')
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if (args.trainpref and os.path.exists(((args.trainpref + '.') + args.align_suffix))):
make_binary_alignment_dataset(((args.trainpref + '.') + args.align_suffix), 'train.align', num_workers=args.workers)
if (args.validpref and os.path.exists(((args.validpref + '.') + args.align_suffix))):
make_binary_alignment_dataset(((args.validpref + '.') + args.align_suffix), 'valid.align', num_workers=args.workers)
if (args.testpref and os.path.exists(((args.testpref + '.') + args.align_suffix))):
make_binary_alignment_dataset(((args.testpref + '.') + args.align_suffix), 'test.align', num_workers=args.workers)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
logger.info('Wrote preprocessed data to {}'.format(args.destdir))
if args.alignfile:
assert args.trainpref, '--trainpref must be set if --alignfile is specified'
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, 'r', encoding='utf-8') as align_file:
with open(src_file_name, 'r', encoding='utf-8') as src_file:
with open(tgt_file_name, 'r', encoding='utf-8') as tgt_file:
for (a, s, t) in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map((lambda x: tuple(x.split('-'))), a.split()))
for (sai, tai) in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if ((srcidx != src_dict.unk()) and (tgtidx != tgt_dict.unk())):
assert (srcidx != src_dict.pad())
assert (srcidx != src_dict.eos())
assert (tgtidx != tgt_dict.pad())
assert (tgtidx != tgt_dict.eos())
if (srcidx not in freq_map):
freq_map[srcidx] = {}
if (tgtidx not in freq_map[srcidx]):
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(args.source_lang, args.target_lang)), 'w', encoding='utf-8') as f:
for (k, v) in align_dict.items():
print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f) |
class Shell():
def __init__(self, name: str, exe: str):
self.name = name
self.exe = exe |
def truncated_normal_mean(r0, v0, zmin, zmax):
assert (zmin < zmax)
s0 = np.sqrt(v0)
ymin = ((zmin - r0) / s0)
ymax = ((zmax - r0) / s0)
if (zmax == (+ np.inf)):
g1 = G1_inf(ymin, (+ 1))
elif (zmin == (- np.inf)):
g1 = G1_inf(ymax, (- 1))
else:
g1 = G1(ymin, ymax)
r = (r0 + (s0 * g1))
return r |
class attach_to_forward_backward_class(Function):
def forward(ctx, tensor, f, b, tag):
ctx.f = f
ctx.b = b
ctx.tag = tag
return f(tensor, tag)
def backward(ctx, grad_output):
return (ctx.b(grad_output, ctx.tag), None, None, None) |
class EnumCase(AstNode):
def __init__(self, name, value_str):
super(EnumCase, self).__init__()
self.name = name
self.value_str = value_str
self.type_ref = TypeRef(name)
def __repr__(self):
return '{} = {},'.format(self.name, self.value_str)
def __eq__(self, other):
try:
other_tuple = (other.name, other.value_str)
except AttributeError:
return False
return ((self.name, self.value_str) == other_tuple)
def __ne__(self, other):
return (not (self == other))
def int_value(self):
return int(self.value_str, base=0) |
class DET_evaluator(Evaluator):
def __init__(self):
self.type = 'DET'
def eval(self):
arguments = []
for (seq, res, gt) in zip(self.sequences, self.tsfiles, self.gtfiles):
arguments.append({'metricObject': DETMetrics(seq), 'args': {'gtDataDir': os.path.join(self.datadir, seq), 'sequence': str(seq), 'pred_file': res, 'gt_file': gt, 'benchmark_name': self.benchmark_name}})
if self.MULTIPROCESSING:
p = mp.Pool(self.NR_CORES)
print('Evaluating on {} cpu cores'.format(self.NR_CORES))
processes = [p.apply_async(run_metrics, kwds=inp) for inp in arguments]
self.results = [p.get() for p in processes]
p.close()
p.join()
else:
print('Evaluating sequentially')
self.results = [run_metrics(**inp) for inp in arguments]
self.failed = False
self.Overall_Results = DETMetrics('OVERALL') |
def get_score(submission_folder='../env'):
submission_path = os.path.join(submission_folder, 'submission.csv')
submission = pd.read_csv(submission_path, index_col=0)
test_dataset = datasets.CIFAR10(root='./data', train=False, download=True)
acc = 0
for (idx, (x, y)) in enumerate(test_dataset):
pred = submission.loc[idx].argmax()
acc += int((pred == y))
return (acc / len(test_dataset)) |
(_reducers.All)
class All(JAXReducer):
name: Final = 'all'
preferred_dtype: Final = np.bool_
needs_position: Final = False
def from_kernel_reducer(cls, reducer: Reducer) -> Self:
assert isinstance(reducer, _reducers.All)
return cls()
def _return_dtype(cls, given_dtype):
return np.bool_
def apply(self, array: ak.contents.NumpyArray, parents: ak.index.Index, starts: ak.index.Index, shifts: (ak.index.Index | None), outlength: ShapeItem) -> ak.contents.NumpyArray:
assert isinstance(array, ak.contents.NumpyArray)
result = jax.ops.segment_min(array.data, parents.data)
result = jax.numpy.asarray(result, dtype=bool)
return ak.contents.NumpyArray(result, backend=array.backend) |
def _maybe_apply(apply_fn, inputs, rng, apply_prob):
should_apply = (jax.random.uniform(rng, shape=()) <= apply_prob)
return jax.lax.cond(should_apply, inputs, apply_fn, inputs, (lambda x: x)) |
class TFCommonDecoderLayer(BaseModule):
def __init__(self, d_model=512, d_inner=1024, n_head=8, d_k=64, d_v=64, ifmask=True, dropout=0.1, qkv_bias=False, act_cfg=dict(type='mmcv.GELU')):
super().__init__()
self.attn = Mask_MultiHeadAttention(n_head, d_model, d_k, d_v, qkv_bias=qkv_bias, dropout=dropout)
self.norm1 = nn.LayerNorm(d_model)
self.mlp = PositionwiseFeedForward(d_model, d_inner, dropout=dropout, act_cfg=act_cfg)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, q, k, v, mask=None, ifmask=True):
residual = q
x = (residual + self.attn(q, k, v, mask, ifmask))
x = self.norm1(x)
residual = x
x = (residual + self.mlp(x))
x = self.norm2(x)
return x |
def arnonA_long_mono_to_string(mono, latex=False, p=2):
if latex:
sq = '\\text{Sq}'
else:
sq = 'Sq'
if (len(mono) == 0):
return '1'
else:
string = ''
for (m, k) in mono:
for i in range(m, (k - 1), (- 1)):
string = ((((string + sq) + '^{') + str((2 ** i))) + '} ')
return string.strip(' ') |
class MockAlgo():
sampler_cls = LocalSampler
def __init__(self, env, policy, max_path_length, n_exploration_traj, meta_eval):
self.env = env
self.policy = policy
self.max_path_length = max_path_length
self.n_exploration_traj = n_exploration_traj
self.meta_eval = meta_eval
def train(self, runner):
for step in runner.step_epochs():
if ((step % 5) == 0):
self.meta_eval.evaluate(self)
def get_exploration_policy(self):
return self.policy
def adapt_policy(self, exploration_policy, exploration_trajectories):
del exploration_policy
assert (len(exploration_trajectories.lengths) == self.n_exploration_traj) |
class AnswerAwareTokenizer():
def __init__(self, total_maxlen, bert_model='google/electra-base-discriminator'):
self.total_maxlen = total_maxlen
self.tok = ElectraTokenizerFast.from_pretrained(bert_model)
def process(self, questions, passages, all_answers=None, mask=None):
return TokenizationObject(self, questions, passages, all_answers, mask)
def tensorize(self, questions, passages):
query_lengths = self.tok(questions, padding='longest', return_tensors='pt').attention_mask.sum((- 1))
encoding = self.tok(questions, passages, padding='longest', truncation='longest_first', return_tensors='pt', max_length=self.total_maxlen, add_special_tokens=True)
return (encoding, query_lengths)
def get_all_candidates(self, encoding, index):
(offsets, endpositions) = self.all_word_positions(encoding, index)
candidates = [(offset, endpos) for (idx, offset) in enumerate(offsets) for endpos in endpositions[idx:(idx + 10)]]
return candidates
def all_word_positions(self, encoding, index):
words = encoding.word_ids(index)
offsets = [position for (position, (last_word_number, current_word_number)) in enumerate(zip(([(- 1)] + words), words)) if (last_word_number != current_word_number)]
endpositions = (offsets[1:] + [len(words)])
return (offsets, endpositions)
def characters_to_tokens(self, text, answers, encoding, index, offset, endpos):
for offset_ in range(offset, (len(text) + 1)):
tokens_offset = encoding.char_to_token(index, offset_)
if (tokens_offset is not None):
break
for endpos_ in range(endpos, (len(text) + 1)):
tokens_endpos = encoding.char_to_token(index, endpos_)
if (tokens_endpos is not None):
break
assert (tokens_offset is not None), (text, answers, offset)
tokens_endpos = (tokens_endpos if (tokens_endpos is not None) else len(encoding.tokens(index)))
return (tokens_offset, tokens_endpos)
def tokens_to_answer(self, encoding, index, text, tokens_offset, tokens_endpos):
char_offset = encoding.word_to_chars(index, encoding.token_to_word(index, tokens_offset)).start
try:
char_next_offset = encoding.word_to_chars(index, encoding.token_to_word(index, tokens_endpos)).start
char_endpos = char_next_offset
except:
char_endpos = encoding.word_to_chars(index, encoding.token_to_word(index, (tokens_endpos - 1))).end
assert (char_offset is not None)
assert (char_endpos is not None)
return text[char_offset:char_endpos].strip() |
def test_prediction_codes(tmp_path: pathlib.Path):
time_horizon = TimeHorizon(datetime.timedelta(days=0), datetime.timedelta(days=10))
labeler = CodeLabeler(['2'], time_horizon, prediction_codes=['4', '5'])
events_with_labels: EventsWithLabels = [(event((2015, 1, 3), 2, None), 'skip'), (event((2015, 1, 3), 4, None), 'skip'), (event((2015, 1, 3), 1, None), 'skip'), (event((2015, 1, 3), 3, None), 'skip'), (event((2015, 10, 5), 1, None), 'skip'), (event((2018, 1, 3), 2, None), 'skip'), (event((2018, 3, 1), 4, None), False), (event((2018, 3, 3), 1, None), 'skip'), (event((2018, 5, 2), 5, None), True), (event((2018, 5, 3), 2, None), 'skip'), (event((2018, 5, 3, 11), 1, None), 'skip'), (event((2018, 5, 4), 4, None), False), (event((2018, 5, 4), 1, None), 'skip'), (event((2018, 11, 1), 5, None), False), (event((2018, 12, 4), 1, None), 'skip'), (event((2018, 12, 30), 4, None), 'out of range')]
run_test_for_labeler(labeler, events_with_labels, help_text='prediction_codes_one_outcomes')
labeler = CodeLabeler(['2', '6', '7'], time_horizon, prediction_codes=['4', '5'])
events_with_labels = [(event((2010, 1, 1), 2, None), 'skip'), (event((2010, 1, 3), 4, None), True), (event((2010, 1, 8), 6, None), 'skip'), (event((2010, 2, 1), 5, None), True), (event((2010, 2, 9), 7, None), 'skip'), (event((2010, 2, 11), 4, None), False), (event((2015, 1, 3), 2, None), 'skip'), (event((2015, 1, 3), 4, None), 'skip'), (event((2015, 1, 3), 1, None), 'skip'), (event((2015, 1, 3), 3, None), 'skip'), (event((2015, 10, 5), 1, None), 'skip'), (event((2018, 1, 3), 2, None), 'skip'), (event((2018, 3, 1), 4, None), True), (event((2018, 3, 2), 7, None), 'skip'), (event((2018, 3, 3), 1, None), 'skip'), (event((2018, 5, 2), 5, None), True), (event((2018, 5, 3), 2, None), 'skip'), (event((2018, 5, 3, 11), 1, None), 'skip'), (event((2018, 5, 4), 4, None), False), (event((2018, 5, 4), 1, None), 'skip'), (event((2018, 11, 1), 5, None), False), (event((2018, 12, 4), 1, None), 'skip'), (event((2018, 12, 30), 4, None), 'out of range')]
run_test_for_labeler(labeler, events_with_labels, help_text='prediction_codes_multiple_outcomes')
labeler = CodeLabeler([2, 6, 7], time_horizon, prediction_codes=[])
events_with_labels = [(event((2010, 1, 1), 2, None), 'skip'), (event((2010, 1, 3), 4, None), 'skip'), (event((2010, 1, 8), 6, None), 'skip'), (event((2010, 2, 1), 5, None), 'skip'), (event((2010, 2, 9), 7, None), 'skip'), (event((2010, 2, 11), 4, None), 'skip'), (event((2015, 1, 3), 2, None), 'skip'), (event((2015, 1, 3), 4, None), 'skip'), (event((2015, 1, 3), 1, None), 'skip'), (event((2015, 1, 3), 3, None), 'skip'), (event((2015, 10, 5), 1, None), 'skip'), (event((2018, 1, 3), 2, None), 'skip'), (event((2018, 3, 1), 4, None), 'skip'), (event((2018, 3, 2), 7, None), 'skip'), (event((2018, 3, 3), 1, None), 'skip'), (event((2018, 5, 2), 5, None), 'skip'), (event((2018, 5, 3), 2, None), 'skip'), (event((2018, 5, 3, 11), 1, None), 'skip'), (event((2018, 5, 4), 4, None), 'skip'), (event((2018, 5, 4), 1, None), 'skip'), (event((2018, 11, 1), 5, None), 'skip'), (event((2018, 12, 4), 1, None), 'skip'), (event((2018, 12, 30), 4, None), 'skip')]
run_test_for_labeler(labeler, events_with_labels, help_text='prediction_codes_zero_predictions') |
class RCHWNonSimplyLacedElement(RCNonSimplyLacedElement):
def check(self):
for partition in self:
for (i, vac_num) in enumerate(partition.vacancy_numbers):
if (vac_num < partition.rigging[i]):
raise ValueError('rigging can be at most the vacancy number')
def f(self, a):
if (not self.phi(a)):
return None
return RCNonSimplyLacedElement.f(self, a)
def weight(self):
P = self.parent().weight_lattice_realization()
alpha = list(P.simple_roots())
return (self.parent()._wt - sum(((sum(x) * alpha[i]) for (i, x) in enumerate(self)))) |
class JHU(NWPU):
def __init__(self, root, list_path, num_samples=None, num_classes=1, multi_scale=True, flip=True, ignore_label=(- 1), base_size=2048, crop_size=(512, 1024), min_unit=(32, 32), center_crop_test=False, downsample_rate=1, scale_factor=(0.5, (1 / 0.5)), mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
super(JHU, self).__init__(root, list_path, num_samples, num_classes, multi_scale, flip, ignore_label, base_size, crop_size, min_unit, center_crop_test, downsample_rate, scale_factor, mean, std)
def read_files(self):
files = []
for item in self.img_list:
image_id = item[0]
files.append({'img': (('images/' + image_id) + '.jpg'), 'label': (('jsons/' + image_id) + '.json'), 'name': image_id})
return files |
def reduction_B(input):
channel_axis = (- 1)
r1 = conv_block(input, 192, 1, 1)
r1 = conv_block(r1, 192, 3, 3, subsample=(2, 2), border_mode='valid')
r2 = conv_block(input, 256, 1, 1)
r2 = conv_block(r2, 256, 1, 7)
r2 = conv_block(r2, 320, 7, 1)
r2 = conv_block(r2, 320, 3, 3, subsample=(2, 2), border_mode='valid')
r3 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(input)
m = concatenate([r1, r2, r3], axis=channel_axis)
return m |
def _spvec2pow(specvec):
fftl2 = (len(specvec) - 1)
fftl = (fftl2 * 2)
power = (specvec[0] + specvec[fftl2])
for k in range(1, fftl2):
power += (2.0 * specvec[k])
power /= fftl
return power |
def walsh_matrix(m0):
m = int(m0)
if (m == 1):
return matrix(GF(2), 1, 2, [0, 1])
if (m > 1):
row2 = [x.list() for x in walsh_matrix((m - 1)).augment(walsh_matrix((m - 1))).rows()]
return matrix(GF(2), m, (2 ** m), ([(([0] * (2 ** (m - 1))) + ([1] * (2 ** (m - 1))))] + row2))
raise ValueError(('%s must be an integer > 0.' % m0)) |
def evaluate_from_args(args: argparse.Namespace) -> Dict[(str, Any)]:
logging.getLogger('allennlp.common.params').disabled = True
logging.getLogger('allennlp.nn.initializers').disabled = True
logging.getLogger('allennlp.modules.token_embedders.embedding').setLevel(logging.INFO)
archive = load_archive(args.archive_file, args.cuda_device, args.overrides)
config = archive.config
prepare_environment(config)
model = archive.model
model.eval()
dataset_reader = DatasetReader.from_params(config.pop('dataset_reader'))
evaluation_data_path = args.evaluation_data_file
logger.info('Reading evaluation data from %s', evaluation_data_path)
dataset = dataset_reader.read(evaluation_data_path)
dataset.index_instances(model.vocab)
iterator = BasicIterator(batch_size=32)
serialization_directory = args.archive_file[:(- 13)]
metrics = evaluate(model, dataset, iterator, args.cuda_device, serialization_directory)
metrics_file_path = os.path.join(serialization_directory, 'metrics.txt')
metrics_file = open(metrics_file_path, 'w+')
logger.info('Finished evaluating.')
logger.info('Metrics:')
for (key, metric) in metrics.items():
if ('overall' in key):
logger.info('%s: %s', key, metric)
if (('gold_spans' in key) or ('predicted_spans' in key)):
continue
metrics_file.write('{}: {}\n'.format(key, metric))
logger.info('Detailed evaluation metrics in %s', metrics_file_path)
return metrics |
class TestEnforceClusterIdUniqueness(unittest.TestCase):
def test_list_of_list(self):
cluster_ids = [['a', 'b', 'c'], ['b', 'c', 'd', 'e']]
new_cluster_ids = utils.enforce_cluster_id_uniqueness(cluster_ids)
self.assertEqual(2, len(new_cluster_ids))
self.assertEqual(3, len(new_cluster_ids[0]))
self.assertEqual(4, len(new_cluster_ids[1]))
merged = [x for new_cluster_id in new_cluster_ids for x in new_cluster_id]
self.assertEqual(7, len(merged))
self.assertEqual(7, len(set(merged))) |
def get_debug_args(budget=30, detector_type=AAD_IFOREST):
return ['--resultsdir=./temp', '--randseed=42', '--reruns=1', ('--detector_type=%d' % detector_type), ('--forest_score_type=%d' % (IFOR_SCORE_TYPE_NEG_PATH_LEN if (detector_type == AAD_IFOREST) else (HST_LOG_SCORE_TYPE if (detector_type == AAD_HSTREES) else (RSF_SCORE_TYPE if (detector_type == AAD_RSFOREST) else 0)))), ('--init=%d' % INIT_UNIF), '--withprior', '--unifprior', ('--constrainttype=%d' % AAD_CONSTRAINT_TAU_INSTANCE), ('--querytype=%d' % QUERY_DETERMINISIC), '--num_query_batch=1', ('--budget=%d' % budget), '--tau=0.03', '--forest_n_trees=100', '--forest_n_samples=256', ('--forest_max_depth=%d' % (100 if (detector_type == AAD_IFOREST) else 7)), '--forest_add_leaf_nodes_only', ('--ensemble_score=%d' % ENSEMBLE_SCORE_LINEAR), '--resultsdir=./temp', '--log_file=./temp/demo_aad.log', '--debug'] |
def get_node_corrs_objects_ids(node_corrs, objects_ids, batch_offset):
node_corrs_objects_ids = []
for node_corr in node_corrs:
node_corrs_objects_ids.append((objects_ids[(node_corr[0] + batch_offset)], objects_ids[(node_corr[1] + batch_offset)]))
return node_corrs_objects_ids |
def fully_connected(inputs, num_outputs, scope, use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[(- 1)].value
weights = _variable_with_weight_decay('weights', shape=[num_input_units, num_outputs], use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = _variable_on_cpu('biases', [num_outputs], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = tf.contrib.layers.instance_norm(outputs)
print('bn in fc')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
def test_trace():
import tracemalloc
tracemalloc.start(10)
time1 = tracemalloc.take_snapshot()
from pycorrector import Corrector
m = Corrector()
c = m.correct('')
print(c)
time2 = tracemalloc.take_snapshot()
stats = time2.compare_to(time1, 'lineno')
print(('*' * 32))
for stat in stats[:3]:
print(stat)
stats = time2.compare_to(time1, 'traceback')
print(('*' * 32))
for stat in stats[:3]:
print(stat.traceback.format()) |
def conv_2d_layer(name, in_tensor, in_ch, out_ch, k_h, k_w, s_h, s_w, stddev=0.01, initial_w=None, padding='SAME'):
with tf.variable_scope(name):
W = tf.get_variable('W', [k_h, k_w, in_ch, out_ch], initializer=tf.contrib.layers.xavier_initializer(True))
conv = tf.nn.conv2d(in_tensor, W, strides=[1, s_h, s_w, 1], padding=padding)
b = tf.get_variable('b', [out_ch], initializer=tf.constant_initializer(0.01))
return tf.reshape(tf.nn.bias_add(conv, b), tf.shape(conv)) |
.parametrize('knn_methods', knn_methods)
def test_desknn_proba(knn_methods):
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
desknn = DESKNN(pool_classifiers, knn_classifier=knn_methods, voting='soft')
desknn.fit(X_dsel, y_dsel)
probas = desknn.predict_proba(X_test)
expected = np.load('deslib/tests/expected_values/desknn_proba_integration.npy')
assert np.allclose(probas, expected) |
def build_and_print_matrices(v, strat):
treated = BooleSet()
v = list(v)
rows = 0
polys_in_mat = []
if (not v):
return
while v:
rows = (rows + 1)
p = v[0]
v = v[1:]
for m in list(p.terms()):
m = Monomial(m)
if (m not in BooleSet(treated)):
i = strat.select(m)
if (i >= 0):
p2 = strat[i]
p2 = (p2 * (m // p2.lead()))
v.append(p2)
polys_in_mat.append(p)
treated = treated.union(p.set())
m2i = {v: k for (k, v) in enumerate(list(Polynomial(BooleSet(treated)).terms()))}
polys_in_mat.sort(key=Polynomial.lead, reverse=True)
polys_in_mat = [[m2i[t] for t in p.terms()] for p in polys_in_mat]
global mat_counter
mat_counter = (mat_counter + 1)
from PIL import Image
rows = len(polys_in_mat)
cols = len(m2i)
im = Image.new('1', (cols, rows), 'white')
for i in range(len(polys_in_mat)):
p = polys_in_mat[i]
for j in p:
assert (i < rows)
assert (j < cols)
im.putpixel((j, i), 0)
file_name = ((strat.matrix_prefix + str(mat_counter)) + '.png')
if os.path.exists(file_name):
os.remove(file_name)
im.save(file_name)
del im
print('MATRIX_SIZE:', rows, 'x', cols) |
def multi_gpu_test_net_on_dataset(args, num_images):
binary_dir = os.getcwd()
binary = os.path.join(binary_dir, (args.test_net_file + '.py'))
assert os.path.exists(binary), "Binary '{}' not found".format(binary)
outputs = subprocess_utils.process_in_parallel('detection', num_images, binary, cfg, cfg.CKPT)
all_boxes = []
all_segms = []
all_parss = []
all_pscores = []
for ins_data in outputs:
all_boxes += ins_data['all_boxes']
all_segms += ins_data['all_segms']
all_parss += ins_data['all_parss']
all_pscores += ins_data['all_pscores']
det_file = os.path.join(cfg.CKPT, 'test', 'detections.pkl')
save_object(dict(all_boxes=all_boxes, all_segms=all_segms, all_parss=all_parss, all_pscores=all_pscores), det_file)
logging_rank('Wrote detections to: {}'.format(os.path.abspath(det_file)), local_rank=0)
return (all_boxes, all_segms, all_parss, all_pscores) |
def test_case11():
url = (brokerIp + '/ngsi-ld/v1/entities/')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(ld_data.subdata14b), headers=headers)
print(r.content)
print(r.status_code)
assert (r.status_code == 400) |
def _cubic_smooth_coeff(signal, lamb):
(rho, omega) = _coeff_smooth(lamb)
cs = ((1 - ((2 * rho) * cos(omega))) + (rho * rho))
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = ((_hc(0, cs, rho, omega) * signal[0]) + add.reduce((_hc((k + 1), cs, rho, omega) * signal)))
yp[1] = (((_hc(0, cs, rho, omega) * signal[0]) + (_hc(1, cs, rho, omega) * signal[1])) + add.reduce((_hc((k + 2), cs, rho, omega) * signal)))
for n in range(2, K):
yp[n] = (((cs * signal[n]) + (((2 * rho) * cos(omega)) * yp[(n - 1)])) - ((rho * rho) * yp[(n - 2)]))
y = zeros((K,), signal.dtype.char)
y[(K - 1)] = add.reduce(((_hs(k, cs, rho, omega) + _hs((k + 1), cs, rho, omega)) * signal[::(- 1)]))
y[(K - 2)] = add.reduce(((_hs((k - 1), cs, rho, omega) + _hs((k + 2), cs, rho, omega)) * signal[::(- 1)]))
for n in range((K - 3), (- 1), (- 1)):
y[n] = (((cs * yp[n]) + (((2 * rho) * cos(omega)) * y[(n + 1)])) - ((rho * rho) * y[(n + 2)]))
return y |
def Fossum_calc(TP, FP, FN, TN):
try:
n = (((TP + FP) + FN) + TN)
part1 = ((TP - 0.5) ** 2)
part2 = ((TP + FP) * (TP + FN))
return ((n * part1) / part2)
except Exception:
return 'None' |
_datapipe('map')
class MapperIterDataPipe(IterDataPipe[T_co]):
datapipe: IterDataPipe
fn: Callable
def __init__(self, datapipe: IterDataPipe, fn: Callable, input_col=None, output_col=None, *, fn_args: Optional[Tuple]=None, fn_kwargs: Optional[Dict]=None, nesting_level: int=0) -> None:
super().__init__()
self.datapipe = datapipe
if (hasattr(fn, '__name__') and (fn.__name__ == '<lambda>') and (not DILL_AVAILABLE)):
warnings.warn('Lambda function is not supported for pickle, please use regular python function or functools.partial instead.')
self.fn = fn
self.input_col = input_col
if ((input_col is None) and (output_col is not None)):
raise ValueError('`output_col` must be None when `input_col` is None.')
if isinstance(output_col, (list, tuple)):
if (len(output_col) > 1):
raise ValueError('`output_col` must be a single-element list or tuple')
output_col = output_col[0]
self.output_col = output_col
self.args = (() if (fn_args is None) else fn_args)
self.kwargs = ({} if (fn_kwargs is None) else fn_kwargs)
if (nesting_level < (- 1)):
raise ValueError('nesting_level must be -1 or >= 0')
self.nesting_level = nesting_level
def _apply_fn(self, data):
if ((self.input_col is None) and (self.output_col is None)):
return self.fn(data, *self.args, **self.kwargs)
if (self.input_col is None):
res = self.fn(data, *self.args, **self.kwargs)
elif isinstance(self.input_col, (list, tuple)):
args = tuple((data[col] for col in self.input_col))
res = self.fn(*args, *self.args, **self.kwargs)
else:
res = self.fn(data[self.input_col], *self.args, **self.kwargs)
if isinstance(data, tuple):
t_flag = True
data = list(data)
else:
t_flag = False
if (self.output_col is None):
if isinstance(self.input_col, (list, tuple)):
data[self.input_col[0]] = res
for idx in sorted(self.input_col[1:], reverse=True):
del data[idx]
else:
data[self.input_col] = res
elif (self.output_col == (- 1)):
data.append(res)
else:
data[self.output_col] = res
return (tuple(data) if t_flag else data)
def _apply(self, data, nesting_level):
if (nesting_level == 0):
return self._apply_fn(data)
elif (nesting_level > 0):
if isinstance(data, DataChunk):
return type(data)([self._apply(i, (nesting_level - 1)) for i in data.raw_iterator()])
elif isinstance(data, list):
return [self._apply(i, (nesting_level - 1)) for i in data]
else:
raise IndexError(f'nesting_level {self.nesting_level} out of range (exceeds data pipe depth)')
elif isinstance(data, DataChunk):
return type(data)([self._apply(i, nesting_level) for i in data.raw_iterator()])
elif isinstance(data, list):
return [self._apply(i, nesting_level) for i in data]
else:
return self._apply_fn(data)
def __iter__(self) -> Iterator[T_co]:
for data in self.datapipe:
(yield self._apply(data, self.nesting_level))
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
def __getstate__(self):
if DILL_AVAILABLE:
dill_function = dill.dumps(self.fn)
else:
dill_function = self.fn
state = (self.datapipe, dill_function, self.input_col, self.output_col, self.args, self.kwargs, self.nesting_level)
return state
def __setstate__(self, state):
(self.datapipe, dill_function, self.input_col, self.output_col, self.args, self.kwargs, self.nesting_level) = state
if DILL_AVAILABLE:
self.fn = dill.loads(dill_function)
else:
self.fn = dill_function |
class MapPermutationTuner(cutout_tuner.CutoutTuner):
def __init__(self, sdfg: SDFG, measurement: dtypes.InstrumentationType=dtypes.InstrumentationType.Timer) -> None:
super().__init__(task='MapPermutation', sdfg=sdfg)
self.instrument = measurement
def cutouts(self) -> Generator[(Tuple[(dace.SDFGState, str)], None, None)]:
for (node, state) in self._sdfg.all_nodes_recursive():
if isinstance(node, dace.nodes.MapEntry):
if (xfh.get_parent_map(state, node) is not None):
continue
node_id = state.node_id(node)
state_id = self._sdfg.node_id(state)
subgraph_nodes = state.scope_subgraph(node).nodes()
cutout = SDFGCutout.singlestate_cutout(state, *subgraph_nodes, make_copy=False)
(yield (cutout, f'{state_id}.{node_id}.{node.label}'))
def space(self, map_entry: dace.nodes.MapEntry, **kwargs) -> Generator[(Tuple[str], None, None)]:
return itertools.permutations(map_entry.map.params)
def config_from_key(self, key: str, **kwargs) -> List[str]:
return key.split('.')
def apply(self, config: List[str], label: str, **kwargs) -> None:
(state_id, node_id, node_label) = label.split('.')
map_entry = self._sdfg.node(int(state_id)).node(int(node_id))
map_entry.range.ranges = [r for list_param in config for (map_param, r) in zip(map_entry.map.params, map_entry.range.ranges) if (list_param == map_param)]
map_entry.map.params = config
def pre_evaluate(self, cutout: dace.SDFG, measurements: int, **kwargs) -> Dict:
cutout.start_state.instrument = self.instrument
map_entry = None
for node in cutout.start_state.nodes():
if (isinstance(node, dace.nodes.MapEntry) and (xfh.get_parent_map(cutout.start_state, node) is None)):
map_entry = node
break
assert (map_entry is not None)
new_kwargs = {'space_kwargs': {'map_entry': map_entry}, 'cutout': cutout.to_json(), 'map_entry_id': cutout.start_state.node_id(map_entry), 'measurements': measurements, 'key': (lambda point: '.'.join(point))}
return new_kwargs
def evaluate(self, config, cutout, map_entry_id: int, measurements: int, **kwargs) -> float:
cutout_ = dace.SDFG.from_json(cutout)
map_ = cutout_.start_state.node(map_entry_id)
map_.range.ranges = [r for list_param in config for (map_param, r) in zip(map_.map.params, map_.range.ranges) if (list_param == map_param)]
map_.map.params = config
return self.measure(cutout_, measurements) |
class SimpleConvAbstractModel(ProteinModel):
config_class = SimpleConvConfig
base_model_prefix = 'simple_conv' |
class EWCParamsComputer(ASR):
def on_fit_start(self):
(self.params, self.fisher) = ({}, {})
self.num_samples = 0
def fit_batch(self, batch):
outputs = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN)
with self.no_sync(False):
loss.backward()
with torch.no_grad():
for (name, param) in self.modules.wavlm.named_parameters():
if ((not param.requires_grad) or (param.grad is None)):
continue
if (name not in self.params):
self.params[name] = param.clone().cpu()
if (name not in self.fisher):
self.fisher[name] = (param.grad.clone() ** 2).cpu()
else:
self.fisher[name] += (param.grad.clone() ** 2).cpu()
self.modules.wavlm.zero_grad()
self.num_samples += 1
return loss.detach().cpu()
def on_stage_end(self, stage, stage_loss, epoch=None):
for name in self.fisher:
self.fisher[name] /= self.num_samples |
def validate_es_nif(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(nif.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(nif.is_valid)
else:
return df.applymap(nif.is_valid)
return nif.is_valid(df) |
_properties
class MapDimShuffle(transformation.SingleStateTransformation):
map_entry = transformation.PatternNode(nodes.MapEntry)
parameters = ListProperty(element_type=str, default=None, desc='Desired order of map parameters')
def expressions(cls):
return [sdutil.node_path_graph(cls.map_entry)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
return True
def apply(self, graph: SDFGState, sdfg: SDFG):
map_entry = self.map_entry
if (self.parameters is None):
return
if (set(self.parameters) != set(map_entry.map.params)):
return
map_entry.range.ranges = [r for list_param in self.parameters for (map_param, r) in zip(map_entry.map.params, map_entry.range.ranges) if (list_param == map_param)]
map_entry.map.params = self.parameters |
class DeformConvFunction(Function):
def forward(ctx, input, offset, weight, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, im2col_step=64):
if ((input is not None) and (input.dim() != 4)):
raise ValueError('Expected 4D tensor as input, got {}D tensor instead.'.format(input.dim()))
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(DeformConvFunction._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride))
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)]
if (not input.is_cuda):
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize'
deform_conv_cuda.deform_conv_forward_cuda(input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step)
return output
_differentiable
def backward(ctx, grad_output):
(input, offset, weight) = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if (not grad_output.is_cuda):
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize'
if (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]):
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
deform_conv_cuda.deform_conv_backward_input_cuda(input, offset, grad_output, grad_input, grad_offset, weight, ctx.bufs_[0], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
deform_conv_cuda.deform_conv_backward_parameters_cuda(input, offset, grad_output, grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, cur_im2col_step)
return (grad_input, grad_offset, grad_weight, None, None, None, None, None)
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range((input.dim() - 2)):
in_size = input.size((d + 2))
pad = padding[d]
kernel = ((dilation[d] * (weight.size((d + 2)) - 1)) + 1)
stride_ = stride[d]
output_size += (((((in_size + (2 * pad)) - kernel) // stride_) + 1),)
if (not all(map((lambda s: (s > 0)), output_size))):
raise ValueError('convolution input is too small (output would be {})'.format('x'.join(map(str, output_size))))
return output_size |
class SawyerSoccerV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'ball_pos': obs[3:6], 'goal_pos': obs[9:], 'unused_info': obs[6:9]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = 1.0
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_ball = (o_d['ball_pos'] + np.array([0.0, 0.0, 0.03]))
pos_goal = o_d['goal_pos']
curr_to_ball = (pos_ball - pos_curr)
curr_to_ball /= np.linalg.norm(curr_to_ball)
ball_to_goal = (pos_goal - pos_ball)
ball_to_goal /= np.linalg.norm(ball_to_goal)
scaling = 0.1
if (np.dot(curr_to_ball[:2], ball_to_goal[:2]) < 0.7):
scaling *= (- 1)
return (pos_ball + (scaling * ball_to_goal)) |
def get_train_test_splits(df: pd.DataFrame, metadata: pd.DataFrame, n: int) -> (pd.DataFrame, pd.DataFrame, np.ndarray):
train_df = df[metadata.trainval]
test_df = df[(~ metadata.trainval)]
test_labels = metadata[(~ metadata.trainval)].anomaly.values
return (train_df.tail(n), test_df.head(n), test_labels[:n]) |
class foursquare_nyc(DatasetBuilder):
def prepare(self, f_names):
fs = [f for f in f_names if ('TSMC2014_NYC.txt' in f)]
raw_data = pandas.read_csv(fs[0], sep=self.dataset_info['sep'], encoding=self.dataset_info['encoding'], header=None)
tdf_dataset = skmob.TrajDataFrame(raw_data, latitude=4, longitude=5, user_id=0, datetime=7)
return tdf_dataset |
def get_reward_processor(config):
if (config.type == 'time_independent'):
return get_raw_reward
elif (config.type == 'time_discounted'):
return get_original_reward
elif (config.type == 'click_checkboxes_hard'):
return get_click_checkboxes_hard
else:
raise ValueError('{} not a valid reward processor type'.format(config.type)) |
class Document():
def __init__(self, publication_date, sentences):
self.publication_date = publication_date
self.sentences = tuple(sentences)
def from_xml(publication_date, text, nlp):
logger = logging.getLogger(__name__)
publication_date = datetime.datetime.strptime(publication_date, '%Y-%m-%d').date()
tokens = []
time_values = []
time_spans = []
root = ElementTree.fromstring(text)
tokens.extend(root.text.split())
time_values.extend(([None] * len(tokens)))
time_spans.extend(([None] * len(tokens)))
for time_tag in root:
if (time_tag.text is None):
continue
splitted_text = time_tag.text.split()
tokens.extend(splitted_text)
time_span = 'd'
if (time_tag.attrib['type'] == 'DATE'):
try:
value = [datetime.datetime.strptime(time_tag.attrib['value'], '%Y-%m-%d').date()]
except ValueError:
try:
value = [datetime.datetime.strptime(time_tag.attrib['value'], '%Y-%m').date()]
time_span = 'm'
except ValueError:
try:
value = [datetime.datetime.strptime(time_tag.attrib['value'], '%Y').date()]
time_span = 'y'
except ValueError:
logger.warning(('Could not parse date ' + time_tag.attrib['value']))
value = [None]
elif (time_tag.attrib['type'] == 'TIME'):
try:
value = [datetime.datetime.strptime(time_tag.attrib['value'], '%Y-%m-%dT%H:%M').date()]
except ValueError:
try:
value = [datetime.datetime.strptime(time_tag.attrib['value'], '%Y-%m-%dTMO').date()]
except ValueError:
try:
value = [datetime.datetime.strptime(time_tag.attrib['value'], '%Y-%m-%dTEV').date()]
except ValueError:
try:
value = [datetime.datetime.strptime(time_tag.attrib['value'], '%Y-%m-%dTNI').date()]
except ValueError:
try:
value = [datetime.datetime.strptime(time_tag.attrib['value'], '%Y-%m-%dTAF').date()]
except ValueError:
logger.warning(('Could not parse date ' + time_tag.attrib['value']))
value = [None]
else:
value = [None]
time_values.extend((value * len(splitted_text)))
time_spans.extend((time_span * len(splitted_text)))
splitted_tail = time_tag.tail.split()
tokens.extend(splitted_tail)
time_values.extend(([None] * len(splitted_tail)))
time_spans.extend(([None] * len(splitted_tail)))
tokens = Document._process_tokens(tokens)
doc = spacy.tokens.Doc(nlp.vocab, words=tokens)
nlp.tagger(doc)
nlp.entity(doc)
nlp.parser(doc)
token_objects = []
for token in doc:
token_objects.append(Token(token.orth_, token.lemma_.lower(), token.tag_, token.ent_type_, token.vector, time_values[token.i], time_spans[token.i]))
sentence_objects = []
for sent in doc.sents:
sent_tokens = token_objects[sent.start:sent.end]
sentence_objects.append(Sentence(sent_tokens, publication_date))
return Document(publication_date, sentence_objects)
def _process_tokens(tokens):
processed_tokens = []
for tok in tokens:
if (tok == '-LRB-'):
processed_tokens.append('(')
elif (tok == '-RRB-'):
processed_tokens.append(')')
elif (tok == '``'):
processed_tokens.append('"')
elif (tok == "''"):
processed_tokens.append('"')
elif (tok == '`'):
processed_tokens.append("'")
else:
processed_tokens.append(tok)
return processed_tokens
def __str__(self):
return '\n'.join([str(s) for s in self.sentences]).strip()
def __iter__(self):
return iter(self.sentences)
def __eq__(self, other):
if isinstance(other, self.__class__):
return ((self.publication_date == other.publication_date) and (self.sentences == other.sentences))
else:
return False
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash((self.publication_date, self.sentences)) |
def run_benchmark():
global parameters
timing_entries = []
with tf.Graph().as_default():
image_size = 224
if (FLAGS.data_format == 'NCHW'):
image_shape = [FLAGS.batch_size, 3, (image_size + 3), (image_size + 3)]
else:
image_shape = [FLAGS.batch_size, (image_size + 3), (image_size + 3), 3]
images = tf.Variable(tf.random_normal(image_shape, dtype=tf.float32, stddev=0.1))
labels = tf.Variable(tf.ones([FLAGS.batch_size], dtype=tf.int32))
last_layer = inference(images)
init = tf.global_variables_initializer()
sess = tf.Session('')
sess.run(init)
run_forward = True
run_forward_backward = True
if (FLAGS.forward_only and FLAGS.forward_backward_only):
raise ValueError('Cannot specify --forward_only and --forward_backward_only at the same time.')
if FLAGS.forward_only:
run_forward_backward = False
elif FLAGS.forward_backward_only:
run_forward = False
if run_forward:
timing_entries.append(time_tensorflow_run(sess, last_layer, 'Forward'))
if run_forward_backward:
objective = loss(last_layer, labels)
grad = tf.gradients(objective, parameters)
timing_entries.append(time_tensorflow_run(sess, grad, 'Forward-backward'))
if FLAGS.csv_file:
store_data_in_csv(timing_entries) |
.parametrize('seed', [311])
def test_graph_clear_buffer(seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4])
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
nn.clear_parameters()
x1 = (x + 1)
x2 = (x1 - 1)
with nn.parameter_scope('conv1'):
z = PF.convolution(x2, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
z4 = PF.affine(z2, 5)
l1 = F.softmax_cross_entropy(z3, t, 1)
L1 = F.mean(l1)
l2 = F.softmax_cross_entropy(z4, t, 1)
L2 = F.mean(l2)
import tempfile
import os
tmpd = tempfile.mkdtemp()
nn.save_parameters(os.path.join(tmpd, 'parameter.h5'))
first = False
for cnng in [False, True]:
for cb in [False, True]:
_ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5'))
for v in nn.get_parameters().values():
v.grad.zero()
nn.forward_all([L1, L2], clear_no_need_grad=cnng)
L1.backward(clear_buffer=False)
L2.backward(clear_buffer=cb)
if (not first):
first = True
g = list(nn.get_parameters().values())[0].g.copy()
else:
g2 = list(nn.get_parameters().values())[0].g.copy()
import platform
if (platform.machine() == 'ppc64le'):
pytest.skip('This test fails on ppc64le')
assert np.all((g == g2)) |
class PythonScriptTaskExecution(TaskExecution):
def __init__(self, model_script_path: str, tmp_dir: Union[(str, None)]=None):
TaskExecution.__init__(self, tmp_dir)
if (not os.path.isfile(model_script_path)):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), model_script_path)
self.model_script_path = model_script_path
self.python_script_format = 'python {} --trainds {} --outputpath {} {}'
def _exec_subprocess(self, script: str) -> Any:
p = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
if (p.stdout is not None):
for stdout_line in iter(p.stdout.readline, ''):
(yield stdout_line)
p.stdout.close()
return_code = p.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, script)
def _execute(self, train_ds: str, val_ds: str, output_path: str, mlargs: Dict[(str, Any)]={}) -> None:
if (val_ds != ''):
mlargs.update({'valds': val_ds})
script = self.python_script_format.format(self.model_script_path, train_ds, output_path, ' '.join(self._transform_arguments(mlargs)))
print(script)
process = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
print('****** IQF subprocess --stdout-- ')
print(stdout.decode())
print('****** IQF subprocess --stderr-- ')
print(stderr.decode())
def _transform_arguments(self, mlargs: Dict[(str, Any)]={}) -> List[str]:
return ['--{} {}'.format(k, v) for (k, v) in mlargs.items()] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.