code stringlengths 101 5.91M |
|---|
def register_Ns3CallbackImpl__Void_Const_ns3Ipv6Header___amp___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ipv6Header const &, ns3::Ptr< ns3::Packet const >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('ns3::Ipv6Header const &', 'arg0'), param('ns3::Ptr< ns3::Packet const >', 'arg1'), param('unsigned int', 'arg2')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
def inception_v2_base(inputs, final_endpoint='Mixed_5c', min_depth=16, depth_multiplier=1.0, use_separable_conv=True, data_format='NHWC', scope=None):
end_points = {}
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater than zero.')
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
if ((data_format != 'NHWC') and (data_format != 'NCHW')):
raise ValueError('data_format must be either NHWC or NCHW.')
if ((data_format == 'NCHW') and use_separable_conv):
raise ValueError('separable convolution only supports NHWC layout. NCHW data format can only be used when use_separable_conv is False.')
concat_dim = (3 if (data_format == 'NHWC') else 1)
with tf.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME', data_format=data_format):
end_point = 'Conv2d_1a_7x7'
if use_separable_conv:
depthwise_multiplier = min(int((depth(64) / 3)), 8)
net = slim.separable_conv2d(inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier, stride=2, padding='SAME', weights_initializer=trunc_normal(1.0), scope=end_point)
else:
net = slim.conv2d(inputs, depth(64), [7, 7], stride=2, weights_initializer=trunc_normal(1.0), scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, depth(64), [1, 1], scope=end_point, weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_5a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1')
net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
raise ValueError(('Unknown final endpoint %s' % final_endpoint)) |
_dispatch
def idst(x, type=2, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None):
return (Dispatchable(x, np.ndarray),) |
class Sentence(object):
def __init__(self, sent):
self.tokens = {}
self.num_tokens = 0
self.sent = sent
self.tree = None
def set_tokens(self, tokens):
for (i, (t, p)) in enumerate(tokens):
if (t == ''):
t = '='
self.tokens[i] = Token(t, p, i)
self.num_tokens = len(self.tokens)
def get_divisions(self, parse_txt_partial):
parse_txt_partial = parse_txt_partial[1:(- 1)]
try:
idx_first_lb = parse_txt_partial.index('(')
name_const = parse_txt_partial[:idx_first_lb].strip()
parse_txt_partial = parse_txt_partial[idx_first_lb:]
count = 0
partition_indices = []
for idx in range(len(parse_txt_partial)):
if (parse_txt_partial[idx] == '('):
count += 1
elif (parse_txt_partial[idx] == ')'):
count -= 1
if (count == 0):
partition_indices.append((idx + 1))
partitions = []
part_idx_prev = 0
for (i, part_idx) in enumerate(partition_indices):
partitions.append(parse_txt_partial[part_idx_prev:part_idx])
part_idx_prev = part_idx
except:
temp = parse_txt_partial.split(' ')
name_const = temp[0]
partitions = [temp[1]]
return (name_const, partitions)
def parse_the_parse(self, parse_txt, node):
if parse_txt.startswith('('):
(phrase_name, divisions) = self.get_divisions(parse_txt)
if (node == None):
node = Node()
node.root = True
node.label = phrase_name
if (phrase_name in NON_TERMINALS):
for phrase in divisions:
if (phrase.strip() == ''):
continue
node_temp = Node()
node_temp.parent = node
node.children.append(self.parse_the_parse(phrase, node_temp))
else:
node.terminal = True
node.phrase = divisions[0]
return node |
class MPNetForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def align(input_file, en_output_file, tg_output_file):
for line in input_file:
fields = line.rstrip().split('\t')
en_chars = ' '.join((str(c) for c in fields[0]))
en_output_file.write((en_chars + '\n'))
tg_output_file.write((fields[1] + '\n')) |
class AdaActiveLearningNodeRegressor(ActiveLearningNodePerceptron, AdaNode):
def __init__(self, initial_stats=None, parent_node=None, random_state=None):
super().__init__(initial_stats, parent_node, random_state)
self._adwin = ADWIN()
self._error_change = False
self._n = 0
def n_leaves(self):
return 1
def error_estimation(self):
return self._adwin.estimation
def error_width(self):
return self._adwin.width
def error_is_null(self):
return (self._adwin is None)
def kill_tree_children(self, hat):
pass
def learn_one(self, X, y, weight, tree, parent, parent_branch):
y_pred = self.predict_one(X, tree=tree)
normalized_error = get_normalized_error(y, y_pred, self)
if tree.bootstrap_sampling:
k = self._random_state.poisson(1.0)
if (k > 0):
weight = (weight * k)
if (self._adwin is None):
self._adwin = ADWIN()
old_error = self.error_estimation
self._adwin.add_element(normalized_error)
self._error_change = self._adwin.detected_change()
if (self._error_change and (old_error > self.error_estimation)):
self._error_change = False
super().learn_one(X, y, weight=weight, tree=tree)
weight_seen = self.total_weight
if ((weight_seen - self.last_split_attempt_at) >= tree.grace_period):
tree._attempt_to_split(self, parent, parent_branch)
self.last_split_attempt_at = weight_seen
def predict_one(self, X, *, tree=None):
prediction_option = tree.leaf_prediction
if (prediction_option == tree._TARGET_MEAN):
return ((self._stats[1] / self._stats[0]) if ((len(self._stats) > 0) and (self._stats[0] > 0)) else 0.0)
else:
return super().predict_one(X, tree=tree)
def filter_instance_to_leaves(self, X, y, weight, parent, parent_branch, update_splitter_counts, found_nodes=None):
if (found_nodes is None):
found_nodes = []
found_nodes.append(FoundNode(self, parent, parent_branch)) |
def evaluate(gold_ud, system_ud):
class Score():
def __init__(self, gold_total, system_total, correct, aligned_total=None):
self.correct = correct
self.gold_total = gold_total
self.system_total = system_total
self.aligned_total = aligned_total
self.precision = ((correct / system_total) if system_total else 0.0)
self.recall = ((correct / gold_total) if gold_total else 0.0)
self.f1 = (((2 * correct) / (system_total + gold_total)) if (system_total + gold_total) else 0.0)
self.aligned_accuracy = ((correct / aligned_total) if aligned_total else aligned_total)
class AlignmentWord():
def __init__(self, gold_word, system_word):
self.gold_word = gold_word
self.system_word = system_word
class Alignment():
def __init__(self, gold_words, system_words):
self.gold_words = gold_words
self.system_words = system_words
self.matched_words = []
self.matched_words_map = {}
def append_aligned_words(self, gold_word, system_word):
self.matched_words.append(AlignmentWord(gold_word, system_word))
self.matched_words_map[system_word] = gold_word
def lower(text):
if ((sys.version_info < (3, 0)) and isinstance(text, str)):
return text.decode('utf-8').lower()
return text.lower()
def spans_score(gold_spans, system_spans):
(correct, gi, si) = (0, 0, 0)
while ((gi < len(gold_spans)) and (si < len(system_spans))):
if (system_spans[si].start < gold_spans[gi].start):
si += 1
elif (gold_spans[gi].start < system_spans[si].start):
gi += 1
else:
correct += (gold_spans[gi].end == system_spans[si].end)
si += 1
gi += 1
return Score(len(gold_spans), len(system_spans), correct)
def alignment_score(alignment, key_fn=None, filter_fn=None):
if (filter_fn is not None):
gold = sum((1 for gold in alignment.gold_words if filter_fn(gold)))
system = sum((1 for system in alignment.system_words if filter_fn(system)))
aligned = sum((1 for word in alignment.matched_words if filter_fn(word.gold_word)))
else:
gold = len(alignment.gold_words)
system = len(alignment.system_words)
aligned = len(alignment.matched_words)
if (key_fn is None):
return Score(gold, system, aligned)
def gold_aligned_gold(word):
return word
def gold_aligned_system(word):
return (alignment.matched_words_map.get(word, 'NotAligned') if (word is not None) else None)
correct = 0
for words in alignment.matched_words:
if ((filter_fn is None) or filter_fn(words.gold_word)):
if (key_fn(words.gold_word, gold_aligned_gold) == key_fn(words.system_word, gold_aligned_system)):
correct += 1
return Score(gold, system, correct, aligned)
def beyond_end(words, i, multiword_span_end):
if (i >= len(words)):
return True
if words[i].is_multiword:
return (words[i].span.start >= multiword_span_end)
return (words[i].span.end > multiword_span_end)
def extend_end(word, multiword_span_end):
if (word.is_multiword and (word.span.end > multiword_span_end)):
return word.span.end
return multiword_span_end
def find_multiword_span(gold_words, system_words, gi, si):
if gold_words[gi].is_multiword:
multiword_span_end = gold_words[gi].span.end
if ((not system_words[si].is_multiword) and (system_words[si].span.start < gold_words[gi].span.start)):
si += 1
else:
multiword_span_end = system_words[si].span.end
if ((not gold_words[gi].is_multiword) and (gold_words[gi].span.start < system_words[si].span.start)):
gi += 1
(gs, ss) = (gi, si)
while ((not beyond_end(gold_words, gi, multiword_span_end)) or (not beyond_end(system_words, si, multiword_span_end))):
if ((gi < len(gold_words)) and ((si >= len(system_words)) or (gold_words[gi].span.start <= system_words[si].span.start))):
multiword_span_end = extend_end(gold_words[gi], multiword_span_end)
gi += 1
else:
multiword_span_end = extend_end(system_words[si], multiword_span_end)
si += 1
return (gs, ss, gi, si)
def compute_lcs(gold_words, system_words, gi, si, gs, ss):
lcs = [([0] * (si - ss)) for i in range((gi - gs))]
for g in reversed(range((gi - gs))):
for s in reversed(range((si - ss))):
if (lower(gold_words[(gs + g)].columns[FORM]) == lower(system_words[(ss + s)].columns[FORM])):
lcs[g][s] = (1 + (lcs[(g + 1)][(s + 1)] if (((g + 1) < (gi - gs)) and ((s + 1) < (si - ss))) else 0))
lcs[g][s] = max(lcs[g][s], (lcs[(g + 1)][s] if ((g + 1) < (gi - gs)) else 0))
lcs[g][s] = max(lcs[g][s], (lcs[g][(s + 1)] if ((s + 1) < (si - ss)) else 0))
return lcs
def align_words(gold_words, system_words):
alignment = Alignment(gold_words, system_words)
(gi, si) = (0, 0)
while ((gi < len(gold_words)) and (si < len(system_words))):
if (gold_words[gi].is_multiword or system_words[si].is_multiword):
(gs, ss, gi, si) = find_multiword_span(gold_words, system_words, gi, si)
if ((si > ss) and (gi > gs)):
lcs = compute_lcs(gold_words, system_words, gi, si, gs, ss)
(s, g) = (0, 0)
while ((g < (gi - gs)) and (s < (si - ss))):
if (lower(gold_words[(gs + g)].columns[FORM]) == lower(system_words[(ss + s)].columns[FORM])):
alignment.append_aligned_words(gold_words[(gs + g)], system_words[(ss + s)])
g += 1
s += 1
elif (lcs[g][s] == (lcs[(g + 1)][s] if ((g + 1) < (gi - gs)) else 0)):
g += 1
else:
s += 1
elif ((gold_words[gi].span.start, gold_words[gi].span.end) == (system_words[si].span.start, system_words[si].span.end)):
alignment.append_aligned_words(gold_words[gi], system_words[si])
gi += 1
si += 1
elif (gold_words[gi].span.start <= system_words[si].span.start):
gi += 1
else:
si += 1
return alignment
if (gold_ud.characters != system_ud.characters):
index = 0
while ((index < len(gold_ud.characters)) and (index < len(system_ud.characters)) and (gold_ud.characters[index] == system_ud.characters[index])):
index += 1
raise UDError(('The concatenation of tokens in gold file and in system file differ!\n' + "First 20 differing characters in gold file: '{}' and system file: '{}'".format(''.join(gold_ud.characters[index:(index + 20)]), ''.join(system_ud.characters[index:(index + 20)]))))
alignment = align_words(gold_ud.words, system_ud.words)
return {'Tokens': spans_score(gold_ud.tokens, system_ud.tokens), 'Sentences': spans_score(gold_ud.sentences, system_ud.sentences), 'Words': alignment_score(alignment), 'UPOS': alignment_score(alignment, (lambda w, _: w.columns[UPOS])), 'XPOS': alignment_score(alignment, (lambda w, _: w.columns[XPOS])), 'UFeats': alignment_score(alignment, (lambda w, _: w.columns[FEATS])), 'AllTags': alignment_score(alignment, (lambda w, _: (w.columns[UPOS], w.columns[XPOS], w.columns[FEATS]))), 'Lemmas': alignment_score(alignment, (lambda w, ga: (w.columns[LEMMA] if (ga(w).columns[LEMMA] != '_') else '_'))), 'UAS': alignment_score(alignment, (lambda w, ga: ga(w.parent))), 'LAS': alignment_score(alignment, (lambda w, ga: (ga(w.parent), w.columns[DEPREL]))), 'CLAS': alignment_score(alignment, (lambda w, ga: (ga(w.parent), w.columns[DEPREL])), filter_fn=(lambda w: w.is_content_deprel)), 'MLAS': alignment_score(alignment, (lambda w, ga: (ga(w.parent), w.columns[DEPREL], w.columns[UPOS], w.columns[FEATS], [(ga(c), c.columns[DEPREL], c.columns[UPOS], c.columns[FEATS]) for c in w.functional_children])), filter_fn=(lambda w: w.is_content_deprel)), 'BLEX': alignment_score(alignment, (lambda w, ga: (ga(w.parent), w.columns[DEPREL], (w.columns[LEMMA] if (ga(w).columns[LEMMA] != '_') else '_'))), filter_fn=(lambda w: w.is_content_deprel))} |
def _individual_mobility_network_individual(traj, self_loops=False):
loc2loc2weight = defaultdict((lambda : defaultdict((lambda : 0))))
traj = traj.sort_values(by=constants.DATETIME)
lats_lngs = traj[[constants.LATITUDE, constants.LONGITUDE]].values
i = 1
for (lat, lng) in lats_lngs[1:]:
prev = tuple(lats_lngs[(i - 1)])
current = (lat, lng)
if (prev != current):
loc2loc2weight[prev][current] += 1
elif self_loops:
loc2loc2weight[prev][current] += 1
else:
pass
i += 1
rows = []
for (loc1, loc2weight) in loc2loc2weight.items():
for (loc2, weight) in loc2weight.items():
rows.append([loc1[0], loc1[1], loc2[0], loc2[1], weight])
return pd.DataFrame(rows, columns=[(constants.LATITUDE + '_origin'), (constants.LONGITUDE + '_origin'), (constants.LATITUDE + '_dest'), (constants.LONGITUDE + '_dest'), 'n_trips']) |
def save_img_uint8(img, pth):
with open_file(pth, 'wb') as f:
Image.fromarray((np.clip(np.nan_to_num(img), 0.0, 1.0) * 255.0).astype(jnp.uint8)).save(f, 'PNG') |
def double_moments(x, y):
(batch_size, x_dim) = x.size()
(_, y_dim) = x.size()
x = torch.cat((x, Variable(torch.ones(batch_size, 1))), dim=1)
y = torch.cat((y, Variable(torch.ones(batch_size, 1))), dim=1)
x_dim += 1
y_dim += 1
x = x.unsqueeze(2)
y = y.unsqueeze(1)
outer_prod = (x.expand(batch_size, x_dim, y_dim) * y.expand(batch_size, x_dim, y_dim))
return outer_prod.view(batch_size, (- 1)) |
def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy_largescale():
n_unique_action = 100
len_list = 10
dim_context = 2
reward_type = 'binary'
random_state = 12345
n_rounds = 10000
dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, random_state=random_state)
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
pscore_item_position = (1 / n_unique_action)
assert np.allclose(np.unique(bandit_feedback['pscore_item_position']), pscore_item_position), f"pscore_item_position must be [{pscore_item_position}], but {np.unique(bandit_feedback['pscore_item_position'])}" |
class GCSDataset(GCDataset):
way_steps: int = None
high_p_randomgoal: float = 0.0
def get_default_config():
return ml_collections.ConfigDict({'p_randomgoal': 0.3, 'p_trajgoal': 0.5, 'p_currgoal': 0.2, 'geom_sample': 0, 'reward_scale': 1.0, 'reward_shift': 0.0, 'terminal': False})
def sample(self, batch_size: int, indx=None):
if (indx is None):
indx = np.random.randint((self.dataset.size - 1), size=batch_size)
batch = self.dataset.sample(batch_size, indx)
goal_indx = self.sample_goals(indx)
success = (indx == goal_indx)
batch['rewards'] = ((success.astype(float) * self.reward_scale) + self.reward_shift)
if self.terminal:
batch['masks'] = (1.0 - success.astype(float))
else:
batch['masks'] = np.ones(batch_size)
batch['goals'] = jax.tree_map((lambda arr: arr[goal_indx]), self.dataset['observations'])
final_state_indx = self.terminal_locs[np.searchsorted(self.terminal_locs, indx)]
way_indx = np.minimum((indx + self.way_steps), final_state_indx)
batch['low_goals'] = jax.tree_map((lambda arr: arr[way_indx]), self.dataset['observations'])
distance = np.random.rand(batch_size)
high_traj_goal_indx = np.round(((np.minimum((indx + 1), final_state_indx) * distance) + (final_state_indx * (1 - distance)))).astype(int)
high_traj_target_indx = np.minimum((indx + self.way_steps), high_traj_goal_indx)
high_random_goal_indx = np.random.randint(self.dataset.size, size=batch_size)
high_random_target_indx = np.minimum((indx + self.way_steps), final_state_indx)
pick_random = (np.random.rand(batch_size) < self.high_p_randomgoal)
high_goal_idx = np.where(pick_random, high_random_goal_indx, high_traj_goal_indx)
high_target_idx = np.where(pick_random, high_random_target_indx, high_traj_target_indx)
batch['high_goals'] = jax.tree_map((lambda arr: arr[high_goal_idx]), self.dataset['observations'])
batch['high_targets'] = jax.tree_map((lambda arr: arr[high_target_idx]), self.dataset['observations'])
if isinstance(batch['goals'], FrozenDict):
batch['observations'] = freeze(batch['observations'])
batch['next_observations'] = freeze(batch['next_observations'])
return batch |
def _flip(arr, axes=None):
if (axes is None):
reverse = ([slice(None, None, (- 1))] * arr.ndim)
else:
reverse = ([slice(None, None, None)] * arr.ndim)
for axis in axes:
reverse[axis] = slice(None, None, (- 1))
return arr[tuple(reverse)] |
def make_context_embedder(opt, embeddings, embed_type='utterance'):
if (opt.context_embedder_type == 'mean'):
return MeanEncoder(opt.enc_layers, embeddings, embed_type)
else:
bidirectional = (True if (opt.context_embedder_type == 'brnn') else False)
return StdRNNEncoder(opt.rnn_type, bidirectional, opt.enc_layers, opt.rnn_size, opt.dropout, embeddings, embed_type, False) |
def convert_k8s_suffix(k8s_value: str) -> float:
try:
return float(k8s_value)
except ValueError:
pass
suffixes = [('Ki', 2, 10), ('Mi', 2, 20), ('Gi', 2, 30), ('Ti', 2, 40), ('Pi', 2, 50), ('Ei', 2, 60), ('n', 10, (- 9)), ('u', 10, (- 6)), ('m', 10, (- 3)), ('k', 10, 3), ('M', 10, 6), ('G', 10, 9), ('T', 10, 12), ('P', 10, 15), ('E', 10, 18)]
for suffix in suffixes:
if k8s_value.endswith(suffix[0]):
k8s_value_without_suffix = k8s_value[:(- len(suffix[0]))]
return (float(k8s_value_without_suffix) * (suffix[1] ** suffix[2]))
return float(k8s_value) |
class CityScapesVideo(data.Dataset):
def __init__(self, transform=None):
self.imgs = make_dataset_video()
if (len(self.imgs) == 0):
raise RuntimeError('Found 0 images, please check the data set')
self.transform = transform
def __getitem__(self, index):
img_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
img_name = os.path.splitext(os.path.basename(img_path))[0]
if (self.transform is not None):
img = self.transform(img)
return (img, img_name)
def __len__(self):
return len(self.imgs) |
_resource
def load_bianque_v1_model():
bianque_v2_model = T5ForConditionalGeneration.from_pretrained(bianque_v1_model_name_or_path)
bianque_v2_model.to(device)
print('bianque_v1 model Load done!')
return bianque_v2_model |
class KerasActivationExtractor(ActivationExtractor):
def __init__(self, model: tf.keras.Model, layer_types_to_extract_inputs: List, image_granularity: ImageGranularity, image_input_manipulation: Callable, linear_layers: Tuple=(Dense, Conv2D)):
self.model = model
self.image_input_manipulation = image_input_manipulation
self.image_granularity = image_granularity
self.layer_types_to_extract_inputs = tuple(layer_types_to_extract_inputs)
self.linear_layers = linear_layers
self.bn_layer_names = [layer.name for layer in model.layers if isinstance(layer, self.layer_types_to_extract_inputs)]
self.num_layers = len(self.bn_layer_names)
print(f'Number of layers = {self.num_layers}')
self.activations = {}
self.last_linear_layer_output = None
self.last_linear_layers = self.get_model_last_layer()
self.layer_list = [layer for layer in self.model.layers if isinstance(layer, self.layer_types_to_extract_inputs)]
self.outputs_list = [layer.input for layer in self.layer_list]
if (self.last_linear_layers is not None):
self.layer_list.append(self.last_linear_layers)
if (self.last_linear_layers is not self.model.layers[(- 1)]):
self.outputs_list.append(self.last_linear_layers.output)
self.outputs_list.append(self.model.output)
self.intermediate_model = tf.keras.Model(inputs=self.model.input, outputs=self.outputs_list)
def get_layer_input_activation(self, layer_name: str) -> Dict:
return self.activations.get(layer_name)
def get_extractor_layer_names(self) -> List:
return self.bn_layer_names
def run_on_inputs(self, inputs: tf.Tensor) -> List[tf.Tensor]:
return self.intermediate_model(inputs=inputs)
def run_model(self, inputs: tf.Tensor) -> tf.Tensor:
intermediate_outputs = self.run_on_inputs(inputs=inputs)
for (i, layer) in enumerate(self.layer_list):
if isinstance(layer, self.layer_types_to_extract_inputs):
input_data = intermediate_outputs[i]
self.activations[layer.name] = {'layer': layer, 'input_data': input_data}
elif (layer == self.last_linear_layers):
self.last_linear_layer_output = intermediate_outputs[i]
return intermediate_outputs[(- 1)]
def get_model_last_layer(self):
last_layer = None
for layer in reversed(self.model.layers):
if isinstance(layer, self.linear_layers):
if (not any((isinstance(node.layer, self.layer_types_to_extract_inputs) for node in layer._outbound_nodes))):
last_layer = layer
break
return last_layer
def remove(self):
self.activations = {} |
def test_unknowntype_categorical():
with pytest.raises(TypeError):
UnknownType(parameters={'__categorical__': True}) |
class MicroStructureProblem(Problem):
def __init__(self):
super().__init__()
self._width = 64
self._height = 64
self._prob = {'empty': 0.5, 'solid': 0.5}
self._border_tile = 'solid'
self._random_probs = True
self._reward_weights = {'tortuosity': 1}
self._max_path_length = ((np.ceil((self._width / 2)) * self._height) + np.floor((self._height / 2)))
self._max_tortuosity = (self._max_path_length / 2)
self._target_path = 20
self.path_coords = []
self.path_length = None
self._max_path_length = ((np.ceil((self._width / 2)) * self._height) + np.floor((self._height / 2)))
self.static_trgs = {'tortuosity': self._max_tortuosity}
self.cond_bounds = {'path-length': (0, self._max_path_length), 'tortuosity': (0, self._max_tortuosity)}
'\n Get a list of all the different tile names\n\n Returns:`\n string[]: that contains all the tile names\n '
def get_tile_types(self):
return ['empty', 'solid']
'\n Adjust the parameters for the current problem\n\n Parameters:\n width (int): change the width of the problem level\n height (int): change the height of the problem level\n probs (dict(string, float)): change the probability of each tile\n intiialization, the names are "empty", "solid"\n target_path (int): the current path length that the episode turn when it reaches\n rewards (dict(string,float)): the weights of each reward change between the new_stats and old_stats\n '
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._target_path = kwargs.get('target_path', self._target_path)
self._random_probs = kwargs.get('random_probs', self._random_probs)
rewards = kwargs.get('rewards')
if (rewards is not None):
for t in rewards:
if (t in self._reward_weights):
self._reward_weights[t] = rewards[t]
'\n Resets the problem to the initial state and save the start_stats from the starting map.\n Also, it can be used to change values between different environment resets\n\n Parameters:\n start_stats (dict(string,any)): the first stats of the map\n '
def reset(self, start_stats):
super().reset(start_stats)
if self._random_probs:
self._prob['empty'] = self._random.random()
self._prob['solid'] = (1 - self._prob['empty'])
'\n Get the current stats of the map\n\n Returns:\n dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations.\n The used status are "reigons": number of connected empty tiles, "F_abs": the longest path across the map\n '
def get_stats(self, map, lenient_paths=False):
map_locations = get_tile_locations(map, self.get_tile_types())
(self.tortuosity, self.path_length, self.path_coords) = calc_tortuosity(map, map_locations, ['empty'], get_path=self.render_path)
m = np.array(map)
emptiness = ((m == 'empty').sum() / m.size)
emptiness += 0.0001
return {'path-length': self.path_length, 'tortuosity': self.tortuosity}
'\n Get the current game reward between two stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n float: the current reward due to the change between the old map stats and the new map stats\n '
def get_reward(self, new_stats, old_stats):
rewards = {}
return 0
'\n Uses the stats to check if the problem ended (episode_over) which means reached\n a satisfying quality based on the stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n boolean: True if the level reached satisfying quality based on the stats and False otherwise\n '
def get_episode_over(self, new_stats, old_stats):
return False
'\n Get any debug information need to be printed\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n dict(any,any): is a debug information that can be used to debug what is\n happening in the problem\n '
def get_debug_info(self, new_stats, old_stats):
return {'regions': new_stats['regions'], 'F_abs': new_stats['F_abs'], 'path-imp': (new_stats['F_abs'] - self._start_stats['F_abs'])}
'\n Get an image on how the map will look like for a specific map\n\n Parameters:\n map (string[][]): the current game map\n\n Returns:\n Image: a pillow image on how the map will look like using the binary graphics\n '
def render(self, map):
if (self._graphics == None):
if self.GVGAI_SPRITES:
self._graphics = {'empty': Image.open((os.path.dirname(__file__) + '/sprites/oryx/floor3.png')).convert('RGBA'), 'solid': Image.open((os.path.dirname(__file__) + '/sprites/oryx/wall3.png')).convert('RGBA'), 'path': Image.open((os.path.dirname(__file__) + '/sprites/newset/snowmanchest.png')).convert('RGBA')}
else:
self._graphics = {'empty': Image.open((PROB_DIR + '/common/empty.png')).convert('RGBA'), 'solid': Image.open((PROB_DIR + '/common/binary/solid.png')).convert('RGBA'), 'path': Image.open((PROB_DIR + '/common/path_g.png')).convert('RGBA')}
return super().render(map, render_path=self.path_coords) |
_module()
class DepthwiseSeparableFCNHead(FCNHead):
def __init__(self, dw_act_cfg=None, **kwargs):
super(DepthwiseSeparableFCNHead, self).__init__(**kwargs)
self.convs[0] = DepthwiseSeparableConvModule(self.in_channels, self.channels, kernel_size=self.kernel_size, padding=(self.kernel_size // 2), norm_cfg=self.norm_cfg, dw_act_cfg=dw_act_cfg)
for i in range(1, self.num_convs):
self.convs[i] = DepthwiseSeparableConvModule(self.channels, self.channels, kernel_size=self.kernel_size, padding=(self.kernel_size // 2), norm_cfg=self.norm_cfg, dw_act_cfg=dw_act_cfg)
if self.concat_input:
self.conv_cat = DepthwiseSeparableConvModule((self.in_channels + self.channels), self.channels, kernel_size=self.kernel_size, padding=(self.kernel_size // 2), norm_cfg=self.norm_cfg, dw_act_cfg=dw_act_cfg) |
class IntGELU(nn.Module):
def __init__(self, quant_mode=True, force_dequant='none'):
super().__init__()
self.quant_mode = quant_mode
if (force_dequant in ['nonlinear', 'gelu']):
logger.info('Force dequantize gelu')
self.quant_mode = False
if (not self.quant_mode):
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14
self.coeff = [(- 0.2888), (- 1.769), 1]
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor((self.coeff[1] / scaling_factor))
c_int = torch.floor((self.coeff[2] / (scaling_factor ** 2)))
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), (- b_int))
y_int = (sign * (((abs_int + b_int) ** 2) + c_int))
scaling_factor = ((scaling_factor ** 2) * self.coeff[0])
y_int = floor_ste.apply((y_int / (2 ** self.const)))
scaling_factor = (scaling_factor * (2 ** self.const))
return (y_int, scaling_factor)
def forward(self, x, scaling_factor=None):
if (not self.quant_mode):
return (self.activation_fn(x), None)
x_int = (x / scaling_factor)
(sigmoid_int, sigmoid_scaling_factor) = self.int_erf(x_int, (scaling_factor / self.k))
shift_int = (1.0 // sigmoid_scaling_factor)
x_int = (x_int * (sigmoid_int + shift_int))
scaling_factor = ((scaling_factor * sigmoid_scaling_factor) / 2)
return ((x_int * scaling_factor), scaling_factor) |
_module()
class ResnetGenerator(nn.Module):
def __init__(self, in_channels, out_channels, base_channels=64, norm_cfg=dict(type='IN'), use_dropout=False, num_blocks=9, padding_mode='reflect', init_cfg=dict(type='normal', gain=0.02)):
super().__init__()
assert (num_blocks >= 0), f'Number of residual blocks must be non-negative, but got {num_blocks}.'
assert isinstance(norm_cfg, dict), f"'norm_cfg' should be dict, butgot {type(norm_cfg)}"
assert ('type' in norm_cfg), "'norm_cfg' must have key 'type'"
use_bias = (norm_cfg['type'] == 'IN')
model = []
model += [ConvModule(in_channels=in_channels, out_channels=base_channels, kernel_size=7, padding=3, bias=use_bias, norm_cfg=norm_cfg, padding_mode=padding_mode)]
num_down = 2
for i in range(num_down):
multiple = (2 ** i)
model += [ConvModule(in_channels=(base_channels * multiple), out_channels=((base_channels * multiple) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias, norm_cfg=norm_cfg)]
multiple = (2 ** num_down)
for i in range(num_blocks):
model += [ResidualBlockWithDropout((base_channels * multiple), padding_mode=padding_mode, norm_cfg=norm_cfg, use_dropout=use_dropout)]
for i in range(num_down):
multiple = (2 ** (num_down - i))
model += [ConvModule(in_channels=(base_channels * multiple), out_channels=((base_channels * multiple) // 2), kernel_size=3, stride=2, padding=1, bias=use_bias, conv_cfg=dict(type='Deconv', output_padding=1), norm_cfg=norm_cfg)]
model += [ConvModule(in_channels=base_channels, out_channels=out_channels, kernel_size=7, padding=3, bias=True, norm_cfg=None, act_cfg=dict(type='Tanh'), padding_mode=padding_mode)]
self.model = nn.Sequential(*model)
self.init_type = ('normal' if (init_cfg is None) else init_cfg.get('type', 'normal'))
self.init_gain = (0.02 if (init_cfg is None) else init_cfg.get('gain', 0.02))
def forward(self, x):
return self.model(x)
def init_weights(self, pretrained=None, strict=True):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
generation_init_weights(self, init_type=self.init_type, init_gain=self.init_gain)
else:
raise TypeError(f"'pretrained' must be a str or None. But received {type(pretrained)}.") |
class ExperimentalFeatureConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props['experimental_feature'] = node_name
def child_constructor(self):
experimental_feature = self.find_prop('experimental_feature')
next_nodes = {'asan': AsanConfigNode, 'xla': XlaConfigNode, 'vulkan': VulkanConfigNode, 'parallel_tbb': ParallelTBBConfigNode, 'parallel_native': ParallelNativeConfigNode, 'onnx': ONNXConfigNode, 'libtorch': LibTorchConfigNode, 'important': ImportantConfigNode, 'build_only': BuildOnlyConfigNode, 'cuda_gcc_override': CudaGccOverrideConfigNode, 'coverage': CoverageConfigNode, 'pure_torch': PureTorchConfigNode}
return next_nodes[experimental_feature] |
def get_grid_search_configs(config, excluded_keys=[]):
def bool_to_string(x: Union[(List[bool], bool)]) -> Union[(List[str], str)]:
if isinstance(x, bool):
return [str(x)]
for (i, j) in enumerate(x):
x[i] = str(j)
return x
flattened_config_dict = flatten(config, reducer='path')
hyper_params = []
for (k, v) in flattened_config_dict.items():
if isinstance(v, list):
if (k in excluded_keys):
flattened_config_dict[k] = ['+'.join(v)]
elif (len(v) > 1):
hyper_params += [k]
if (isinstance(v, list) and isinstance(v[0], bool)):
flattened_config_dict[k] = bool_to_string(v)
if (not isinstance(v, list)):
if isinstance(v, bool):
flattened_config_dict[k] = bool_to_string(v)
else:
flattened_config_dict[k] = [v]
(keys, values) = zip(*flattened_config_dict.items())
experiments = [dict(zip(keys, v)) for v in itertools.product(*values)]
for (exp_id, exp) in enumerate(experiments):
for param in excluded_keys:
exp[param] = exp[param].strip().split('+')
for (param_name, param_value) in exp.items():
if (isinstance(param_value, list) and (param_value[0] in ['True', 'False'])):
exp[param_name] = [(True if (x == 'True') else False) for x in param_value]
if (param_value in ['True', 'False']):
if (param_value == 'True'):
exp[param_name] = True
else:
exp[param_name] = False
experiments[exp_id] = unflatten(exp, splitter='path')
return (experiments, hyper_params) |
def test_len():
array = ak.highlevel.Array([1.1, 2.2, 3.3, 4.4, 5.5])
def f1(x):
return len(x)
assert (f1(array) == 5) |
class CircleLoss(GenericPairLoss):
def __init__(self, m=0.4, gamma=80, **kwargs):
super().__init__(mat_based_loss=True, **kwargs)
c_f.assert_distance_type(self, CosineSimilarity)
self.m = m
self.gamma = gamma
self.soft_plus = torch.nn.Softplus(beta=1)
self.op = (1 + self.m)
self.on = (- self.m)
self.delta_p = (1 - self.m)
self.delta_n = self.m
self.add_to_recordable_attributes(list_of_names=['m', 'gamma', 'op', 'on', 'delta_p', 'delta_n'], is_stat=False)
def _compute_loss(self, mat, pos_mask, neg_mask):
pos_mask_bool = pos_mask.bool()
neg_mask_bool = neg_mask.bool()
anchor_positive = mat[pos_mask_bool]
anchor_negative = mat[neg_mask_bool]
new_mat = torch.zeros_like(mat)
new_mat[pos_mask_bool] = (((- self.gamma) * torch.relu((self.op - anchor_positive.detach()))) * (anchor_positive - self.delta_p))
new_mat[neg_mask_bool] = ((self.gamma * torch.relu((anchor_negative.detach() - self.on))) * (anchor_negative - self.delta_n))
logsumexp_pos = lmu.logsumexp(new_mat, keep_mask=pos_mask_bool, add_one=False, dim=1)
logsumexp_neg = lmu.logsumexp(new_mat, keep_mask=neg_mask_bool, add_one=False, dim=1)
losses = self.soft_plus((logsumexp_pos + logsumexp_neg))
zero_rows = torch.where(((torch.sum(pos_mask, dim=1) == 0) | (torch.sum(neg_mask, dim=1) == 0)))[0]
final_mask = torch.ones_like(losses)
final_mask[zero_rows] = 0
losses = (losses * final_mask)
return {'loss': {'losses': losses, 'indices': c_f.torch_arange_from_size(new_mat), 'reduction_type': 'element'}}
def get_default_reducer(self):
return AvgNonZeroReducer()
def get_default_distance(self):
return CosineSimilarity() |
def register_Ns3QueueDiscContainer_methods(root_module, cls):
cls.add_constructor([param('ns3::QueueDiscContainer const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::Ptr< ns3::QueueDisc >', 'qDisc')])
cls.add_method('Add', 'void', [param('ns3::QueueDiscContainer', 'other')])
cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::QueueDisc >', 'qDisc')])
cls.add_method('Begin', 'ns3::QueueDiscContainer::ConstIterator', [], is_const=True)
cls.add_method('End', 'ns3::QueueDiscContainer::ConstIterator', [], is_const=True)
cls.add_method('Get', 'ns3::Ptr< ns3::QueueDisc >', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetN', 'uint32_t', [], is_const=True)
return |
class Compose_Joint(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, target):
for t in self.transforms:
(img, target) = t(img, target)
return (img, target)
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string |
def mlir_tasklet_recursion(A: dace.int32[2], B: dace.int32[1]):
('MLIR')
def fib():
(a << A[0])
(b >> B[0]) |
def assert_strict_weak_order(a, b, c, cmp_func):
from sage.matrix.constructor import matrix
from sage.combinat.permutation import Permutations
x = (a, b, c)
cmp_M = matrix(3, 3)
for i in range(3):
for j in range(3):
cmp_M[(i, j)] = (cmp_func(x[i], x[j]) == 1)
msg = 'the binary relation failed to be a strict weak order on the elements \n'
msg += ' a = {}\n b = {}\n c = {}\n'.format(a, b, c)
msg += str(cmp_M)
for i in range(3):
if cmp_M[(i, i)]:
raise ValueError(msg)
for j in range(i):
if (cmp_M[(i, j)] and cmp_M[(j, i)]):
raise ValueError(msg)
def incomparable(i, j):
return (not (cmp_M[(i, j)] or cmp_M[(j, i)]))
for (i, j, k) in Permutations([0, 1, 2]):
if (cmp_M[(i, j)] and cmp_M[(j, k)] and (not cmp_M[(i, k)])):
raise ValueError(msg)
if (incomparable(i, j) and incomparable(j, k) and (not incomparable(i, k))):
raise ValueError(msg) |
def main(cfg):
if (cfg.SEED_VALUE >= 0):
print(f'Seed value for the experiment {cfg.SEED_VALUE}')
os.environ['PYTHONHASHSEED'] = str(cfg.SEED_VALUE)
random.seed(cfg.SEED_VALUE)
torch.manual_seed(cfg.SEED_VALUE)
np.random.seed(cfg.SEED_VALUE)
torch.cuda.manual_seed(cfg.SEED_VALUE)
torch.cuda.manual_seed_all(cfg.SEED_VALUE)
logger = create_logger(cfg.LOGDIR, phase='train')
logger.info(f'GPU name -> {torch.cuda.get_device_name()}')
logger.info(f"GPU feat -> {torch.cuda.get_device_properties('cuda')}")
logger.info(pprint.pformat(cfg))
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
writer = SummaryWriter(log_dir=cfg.LOGDIR)
writer.add_text('config', pprint.pformat(cfg), 0)
data_loaders = get_data_loaders(cfg)
loss = GLoTLoss(e_loss_weight=cfg.LOSS.KP_2D_W, e_3d_loss_weight=cfg.LOSS.KP_3D_W, e_pose_loss_weight=cfg.LOSS.POSE_W, e_shape_loss_weight=cfg.LOSS.SHAPE_W, d_motion_loss_weight=cfg.LOSS.D_MOTION_LOSS_W, vel_or_accel_2d_weight=cfg.LOSS.vel_or_accel_2d_weight, vel_or_accel_3d_weight=cfg.LOSS.vel_or_accel_3d_weight, use_accel=cfg.LOSS.use_accel)
model_module = importlib.import_module(('.%s' % cfg.MODEL.MODEL_NAME), 'lib.models')
generator = model_module.GLoT(seqlen=cfg.DATASET.SEQLEN, batch_size=cfg.TRAIN.BATCH_SIZE, n_layers=cfg.MODEL.n_layers, d_model=cfg.MODEL.d_model, num_head=cfg.MODEL.num_head, dropout=cfg.MODEL.dropout, drop_path_r=cfg.MODEL.drop_path_r, atten_drop=cfg.MODEL.atten_drop, mask_ratio=cfg.MODEL.mask_ratio, short_n_layers=cfg.MODEL.short_n_layers, short_d_model=cfg.MODEL.short_d_model, short_num_head=cfg.MODEL.short_num_head, short_dropout=cfg.MODEL.short_dropout, short_drop_path_r=cfg.MODEL.short_drop_path_r, short_atten_drop=cfg.MODEL.short_atten_drop, stride_short=cfg.MODEL.stride_short, drop_reg_short=cfg.MODEL.drop_reg_short, pretrained=cfg.TRAIN.PRETRAINED_REGRESSOR).to(cfg.DEVICE)
logger.info(f'net: {generator}')
net_params = sum(map((lambda x: x.numel()), generator.parameters()))
logger.info(f'params num: {net_params}')
gen_optimizer = get_optimizer(model=generator, optim_type=cfg.TRAIN.GEN_OPTIM, lr=cfg.TRAIN.GEN_LR, weight_decay=cfg.TRAIN.GEN_WD, momentum=cfg.TRAIN.GEN_MOMENTUM)
motion_discriminator = MotionDiscriminator(rnn_size=cfg.TRAIN.MOT_DISCR.HIDDEN_SIZE, input_size=69, num_layers=cfg.TRAIN.MOT_DISCR.NUM_LAYERS, output_size=1, feature_pool=cfg.TRAIN.MOT_DISCR.FEATURE_POOL, attention_size=(None if (cfg.TRAIN.MOT_DISCR.FEATURE_POOL != 'attention') else cfg.TRAIN.MOT_DISCR.ATT.SIZE), attention_layers=(None if (cfg.TRAIN.MOT_DISCR.FEATURE_POOL != 'attention') else cfg.TRAIN.MOT_DISCR.ATT.LAYERS), attention_dropout=(None if (cfg.TRAIN.MOT_DISCR.FEATURE_POOL != 'attention') else cfg.TRAIN.MOT_DISCR.ATT.DROPOUT)).to(cfg.DEVICE)
dis_motion_optimizer = get_optimizer(model=motion_discriminator, optim_type=cfg.TRAIN.MOT_DISCR.OPTIM, lr=cfg.TRAIN.MOT_DISCR.LR, weight_decay=cfg.TRAIN.MOT_DISCR.WD, momentum=cfg.TRAIN.MOT_DISCR.MOMENTUM)
motion_lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(dis_motion_optimizer, mode='min', factor=0.1, patience=cfg.TRAIN.LR_PATIENCE, verbose=True)
lr_scheduler = CosineAnnealingWarmupRestarts(gen_optimizer, first_cycle_steps=cfg.TRAIN.END_EPOCH, max_lr=cfg.TRAIN.GEN_LR, min_lr=(cfg.TRAIN.GEN_LR * 0.1), warmup_steps=cfg.TRAIN.LR_PATIENCE)
Trainer(cfg=cfg, data_loaders=data_loaders, generator=generator, motion_discriminator=motion_discriminator, criterion=loss, dis_motion_optimizer=dis_motion_optimizer, gen_optimizer=gen_optimizer, writer=writer, lr_scheduler=lr_scheduler, motion_lr_scheduler=motion_lr_scheduler, val_epoch=cfg.TRAIN.val_epoch).fit() |
.parametrize('dtype', [ti.i32, ti.i64])
_utils.test(arch=supported_archs_taichi_ndarray)
def test_default_ip_ndarray(dtype):
arch = ti.lang.impl.current_cfg().arch
ti.reset()
ti.init(arch=arch, default_ip=dtype)
x = ti.Vector.ndarray(2, int, ())
assert (x.dtype == impl.get_runtime().default_ip) |
def bbox(img):
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
(rmin, rmax) = np.where(rows)[0][[0, (- 1)]]
(cmin, cmax) = np.where(cols)[0][[0, (- 1)]]
return (rmin, rmax, cmin, cmax) |
def test_clean_custom(df_text: pd.DataFrame) -> None:
pipeline: List[Dict[(str, Any)]] = [{'operator': 'lowercase'}, {'operator': 'remove_html'}, {'operator': 'replace_bracketed', 'parameters': {'brackets': 'square', 'value': '**spoilers**'}}, {'operator': 'replace_bracketed', 'parameters': {'brackets': 'curly', 'value': 'in every aspect'}}, {'operator': 'remove_whitespace'}]
df_clean = clean_text(df_text, 'text', pipeline=pipeline)
df_check = df_text.copy()
df_check['text'] = ["'zzzzz!' if imdb would allow one-word reviews, that's what mine would be.", 'the cast played shakespeare.shakespeare lost.', 'simon of the desert (simon del desierto) is a 1965 film directed by luis bunuel.', "**spoilers** i don't think i've seen a film this bad before in every aspect", 'cannes 1968: a video essay', 'recap thread for excellent panel, hosted by with _nyc and ', '#gameofthrones: season 8 is #rotten at 54% on the #tomatometer. but does it deserve to be?', "come join and share your thoughts on this week's episode: '123', np.nan, 'null']
assert df_check.equals(df_clean) |
def test_superb_sd():
with tempfile.TemporaryDirectory() as tempdir:
secs = [10, 2, 1, 8, 5]
with pseudo_audio(secs) as (wav_paths, num_samples):
class TestSD(SuperbSD):
def default_config(self) -> dict:
config = super().default_config()
config['prepare_data'] = {}
return config
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only=False):
record_id = [Path(path).stem for path in wav_paths]
durations = secs
speaker = ['a', 'b', 'a', 'a', 'b']
utt_id = record_id
start_secs = [0.0, 0.1, 0.2, 0.3, 0.0]
end_secs = [5.2, 1.0, 0.3, 5.4, 4.9]
df = pd.DataFrame(data={'record_id': record_id, 'wav_path': wav_paths, 'duration': durations, 'utt_id': utt_id, 'speaker': speaker, 'start_sec': start_secs, 'end_sec': end_secs})
train_csv = (Path(target_dir) / 'train.csv')
valid_csv = (Path(target_dir) / 'valid.csv')
test_csv = (Path(target_dir) / 'test.csv')
df.to_csv(train_csv)
df.to_csv(valid_csv)
df.to_csv(test_csv)
return (train_csv, valid_csv, [test_csv])
problem = TestSD()
config = problem.default_config()
config['target_dir'] = tempdir
config['device'] = 'cpu'
config['train']['total_steps'] = 4
config['train']['log_step'] = 1
config['train']['eval_step'] = 2
config['train']['save_step'] = 2
config['eval_batch'] = 2
config['build_upstream']['name'] = 'fbank'
problem.run(**config) |
class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float=1e-08) -> None:
super().__init__()
(self.scale, self.eps) = ((dim ** (- 0.5)), eps)
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
norm = (torch.norm(x, dim=(- 1), keepdim=True) * self.scale)
return ((x / norm.clamp(min=self.eps)) * self.g) |
def render_dt_cat(itmdt: Intermediate, cfg: Config) -> Dict[(str, Any)]:
plot_width = (cfg.plot.width if (cfg.plot.width is not None) else 972)
plot_height = (cfg.plot.height if (cfg.plot.height is not None) else 400)
tabs: List[Panel] = []
if cfg.line.enable:
(data, grp_cnt_stats, timeunit) = itmdt['linedata']
tabs.append(dt_multiline_viz(data, itmdt['x'], itmdt['y'], timeunit, cfg.line.yscale, plot_width, plot_height, grp_cnt_stats))
if cfg.stacked.enable:
(df, grp_cnt_stats, timeunit) = itmdt['stackdata']
tabs.append(stacked_viz(df, itmdt['x'], itmdt['y'], grp_cnt_stats, plot_width, plot_height, timeunit))
return {'layout': [panel.child for panel in tabs], 'meta': [panel.title for panel in tabs], 'container_width': plot_width} |
def _fix_order():
os.environ['CUDA_DEVICE_ORDER'] = os.environ.get('CUDA_DEVICE_ORDER', 'PCI_BUS_ID') |
class TestLogParserConfig():
def test_from_dict(self):
config_dict = {'parsing_algorithm': 'drain', 'parsing_algo_params': None}
config = LogParserConfig.from_dict(config_dict)
print(config)
assert (config.parsing_algorithm == 'drain'), 'Algorithm is not the target one'
assert (config.custom_config is None), 'Custom config is not None'
def test_from_dict_with_drain_params(self):
config_dict = {'parsing_algorithm': 'drain', 'parsing_algo_params': {'sim_th': 0.2, 'extra_delimiters': [',']}}
config = LogParserConfig.from_dict(config_dict)
print(config)
assert (config.parsing_algorithm == 'drain'), 'Algorithm is not the target one'
assert isinstance(config.parsing_algo_params, DrainParams), 'Params is not instance of DrainParams'
assert (config.custom_config is None), 'Custom config is not None' |
class GradientPTQBaseTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test, quant_method=QuantizationMethod.SYMMETRIC, rounding_type=RoundingType.STE, per_channel=True, input_shape=(1, 16, 16, 3), hessian_weights=True, log_norm_weights=True, scaled_log_norm=False, quantization_parameter_learning=True):
super().__init__(unit_test, input_shape=input_shape, experimental_exporter=True)
self.quant_method = quant_method
self.rounding_type = rounding_type
self.per_channel = per_channel
self.hessian_weights = hessian_weights
self.log_norm_weights = log_norm_weights
self.scaled_log_norm = scaled_log_norm
if (rounding_type == RoundingType.SoftQuantizer):
self.override_params = {QUANT_PARAM_LEARNING_STR: quantization_parameter_learning}
elif (rounding_type == RoundingType.STE):
self.override_params = {MAX_LSB_STR: DefaultDict({}, 1)}
else:
self.override_params = None
def get_tpc(self):
return get_tpc('gptq_test', 16, 16, self.quant_method)
def get_quantization_config(self):
return mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING, weights_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING, relu_bound_to_power_of_2=True, weights_bias_correction=False, weights_per_channel_threshold=self.per_channel)
def get_gptq_config(self):
return GradientPTQConfig(5, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), optimizer_rest=tf.keras.optimizers.Adam(learning_rate=0.0001), loss=multiple_tensors_mse_loss, train_bias=True, rounding_type=self.rounding_type, use_hessian_based_weights=self.hessian_weights, hessian_weights_config=GPTQHessianScoresConfig(log_norm=self.log_norm_weights, scale_log_norm=self.scaled_log_norm), gptq_quantizer_params_override=self.override_params)
def create_networks(self):
in_shape = self.get_input_shapes()[0][1:]
return build_model(in_shape)
def compare(self, ptq_model, model_float, input_x=None, quantization_info: UserInformation=None):
raise NotImplementedError(f'{self.__class__} did not implement compare')
def run_test(self):
x = self.generate_inputs()
def representative_data_gen():
for _ in range(self.num_calibration_iter):
(yield x)
model_float = self.create_networks()
tpc = self.get_tpc()
core_config = self.get_core_config()
(ptq_model, quantization_info) = mct.ptq.keras_post_training_quantization_experimental(model_float, representative_data_gen, target_kpi=self.get_kpi(), core_config=core_config, target_platform_capabilities=tpc, new_experimental_exporter=self.experimental_exporter)
(ptq_gptq_model, quantization_info) = mct.gptq.keras_gradient_post_training_quantization_experimental(model_float, representative_data_gen, gptq_config=GradientPTQConfigV2.from_v1(self.num_calibration_iter, self.get_gptq_config()), target_kpi=self.get_kpi(), core_config=core_config, target_platform_capabilities=tpc, new_experimental_exporter=self.experimental_exporter)
self.compare(ptq_model, ptq_gptq_model, input_x=x, quantization_info=quantization_info) |
def exec_file(program_name, args=()):
runcmd(([os.path.abspath(program_name)] + list(args)), shell=False) |
def _raise_for_params(params, owner, method):
caller = (f'{owner.__class__.__name__}.{method}' if method else owner.__class__.__name__)
if ((not _routing_enabled()) and params):
raise ValueError(f'Passing extra keyword arguments to {caller} is only supported if enable_metadata_routing=True, which you can set using `sklearn.set_config`. See the User Guide < for more details. Extra parameters passed are: {set(params)}') |
def complex_init(in_features, out_features, kernel_size=None, criterion='glorot'):
if (kernel_size is not None):
receptive_field = np.prod(kernel_size)
fan_out = (out_features * receptive_field)
fan_in = (in_features * receptive_field)
else:
fan_out = out_features
fan_in = in_features
if (criterion == 'glorot'):
s = (1.0 / (fan_in + fan_out))
else:
s = (1.0 / fan_in)
if (kernel_size is None):
size = (in_features, out_features)
elif (type(kernel_size) is int):
size = ((out_features, in_features) + tuple((kernel_size,)))
else:
size = ((out_features, in_features) + (*kernel_size,))
modulus = np.random.rayleigh(scale=s, size=size)
phase = np.random.uniform((- np.pi), np.pi, size)
weight_real = (modulus * np.cos(phase))
weight_imag = (modulus * np.sin(phase))
return (weight_real, weight_imag) |
class BertLayer(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def probe_description(name, ctx=None):
ctx = _get_ctx(ctx)
return Z3_probe_get_descr(ctx.ref(), name) |
class SDECData():
def __init__(self, last_interaction_type, last_line_interaction_in_id, last_line_interaction_out_id, last_line_interaction_in_nu, lines_df, packet_nus, packet_energies, r_inner, spectrum_delta_frequency, spectrum_frequency_bins, spectrum_luminosity_density_lambda, spectrum_wavelength, t_inner, time_of_simulation):
self.packets_df = pd.DataFrame({'nus': packet_nus, 'lambdas': packet_nus.to('angstrom', u.spectral()), 'energies': packet_energies, 'last_interaction_type': last_interaction_type, 'last_line_interaction_out_id': last_line_interaction_out_id, 'last_line_interaction_in_id': last_line_interaction_in_id, 'last_line_interaction_in_nu': last_line_interaction_in_nu})
self.lines_df = lines_df
self.r_inner = r_inner
self.spectrum_delta_frequency = spectrum_delta_frequency
self.spectrum_frequency_bins = spectrum_frequency_bins
self.spectrum_frequency = spectrum_frequency_bins[:(- 1)]
self.spectrum_luminosity_density_lambda = spectrum_luminosity_density_lambda
self.spectrum_wavelength = spectrum_wavelength
self.t_inner = t_inner
self.time_of_simulation = time_of_simulation
line_mask = ((self.packets_df['last_interaction_type'] > (- 1)) & (self.packets_df['last_line_interaction_in_id'] > (- 1)))
self.packets_df_line_interaction = self.packets_df.loc[line_mask].copy()
self.packets_df_line_interaction['last_line_interaction_atom'] = self.lines_df['atomic_number'].iloc[self.packets_df_line_interaction['last_line_interaction_out_id']].to_numpy()
self.packets_df_line_interaction['last_line_interaction_species'] = ((self.lines_df['atomic_number'].iloc[self.packets_df_line_interaction['last_line_interaction_out_id']].to_numpy() * 100) + self.lines_df['ion_number'].iloc[self.packets_df_line_interaction['last_line_interaction_out_id']].to_numpy())
def from_simulation(cls, sim, packets_mode):
lines_df = sim.plasma.atomic_data.lines.reset_index().set_index('line_id')
r_inner = sim.simulation_state.r_inner
t_inner = sim.simulation_state.t_inner
time_of_simulation = sim.transport.time_of_simulation
if (packets_mode == 'virtual'):
return cls(last_interaction_type=sim.transport.virt_packet_last_interaction_type, last_line_interaction_in_id=sim.transport.virt_packet_last_line_interaction_in_id, last_line_interaction_out_id=sim.transport.virt_packet_last_line_interaction_out_id, last_line_interaction_in_nu=sim.transport.virt_packet_last_interaction_in_nu, lines_df=lines_df, packet_nus=u.Quantity(sim.transport.virt_packet_nus, 'Hz'), packet_energies=u.Quantity(sim.transport.virt_packet_energies, 'erg'), r_inner=r_inner, spectrum_delta_frequency=sim.transport.spectrum_virtual.delta_frequency, spectrum_frequency_bins=sim.transport.spectrum_virtual._frequency, spectrum_luminosity_density_lambda=sim.transport.spectrum_virtual.luminosity_density_lambda, spectrum_wavelength=sim.transport.spectrum_virtual.wavelength, t_inner=t_inner, time_of_simulation=time_of_simulation)
elif (packets_mode == 'real'):
return cls(last_interaction_type=sim.transport.last_interaction_type[sim.transport.emitted_packet_mask], last_line_interaction_in_id=sim.transport.last_line_interaction_in_id[sim.transport.emitted_packet_mask], last_line_interaction_out_id=sim.transport.last_line_interaction_out_id[sim.transport.emitted_packet_mask], last_line_interaction_in_nu=sim.transport.last_interaction_in_nu[sim.transport.emitted_packet_mask], lines_df=lines_df, packet_nus=sim.transport.output_nu[sim.transport.emitted_packet_mask], packet_energies=sim.transport.output_energy[sim.transport.emitted_packet_mask], r_inner=r_inner, spectrum_delta_frequency=sim.transport.spectrum.delta_frequency, spectrum_frequency_bins=sim.transport.spectrum._frequency, spectrum_luminosity_density_lambda=sim.transport.spectrum.luminosity_density_lambda, spectrum_wavelength=sim.transport.spectrum.wavelength, t_inner=t_inner, time_of_simulation=time_of_simulation)
else:
raise ValueError("Invalid value passed to packets_mode. Only allowed values are 'virtual' or 'real'")
def from_hdf(cls, hdf_fpath, packets_mode):
with pd.HDFStore(hdf_fpath, 'r') as hdf:
lines_df = hdf['/simulation/plasma/lines'].reset_index().set_index('line_id')
r_inner = u.Quantity(hdf['/simulation/simulation_state/r_inner'].to_numpy(), 'cm')
t_inner = u.Quantity(hdf['/simulation/simulation_state/scalars'].t_inner, 'K')
time_of_simulation = u.Quantity(hdf['/simulation/transport/scalars'].time_of_simulation, 's')
if (packets_mode == 'virtual'):
return cls(last_interaction_type=hdf['/simulation/transport/virt_packet_last_interaction_type'], last_line_interaction_in_id=hdf['/simulation/transport/virt_packet_last_line_interaction_in_id'], last_line_interaction_out_id=hdf['/simulation/transport/virt_packet_last_line_interaction_out_id'], last_line_interaction_in_nu=u.Quantity(hdf['/simulation/transport/virt_packet_last_interaction_in_nu'].to_numpy(), 'Hz'), lines_df=lines_df, packet_nus=u.Quantity(hdf['/simulation/transport/virt_packet_nus'].to_numpy(), 'Hz'), packet_energies=u.Quantity(hdf['/simulation/transport/virt_packet_energies'].to_numpy(), 'erg'), r_inner=r_inner, spectrum_delta_frequency=u.Quantity(hdf['/simulation/transport/spectrum_virtual/scalars'].delta_frequency, 'Hz'), spectrum_frequency_bins=u.Quantity(hdf['/simulation/transport/spectrum_virtual/_frequency'].to_numpy(), 'Hz'), spectrum_luminosity_density_lambda=u.Quantity(hdf['/simulation/transport/spectrum_virtual/luminosity_density_lambda'].to_numpy(), 'erg / s cm').to('erg / s AA'), spectrum_wavelength=u.Quantity(hdf['/simulation/transport/spectrum_virtual/wavelength'].to_numpy(), 'cm').to('AA'), t_inner=t_inner, time_of_simulation=time_of_simulation)
elif (packets_mode == 'real'):
emitted_packet_mask = hdf['/simulation/transport/emitted_packet_mask'].to_numpy()
return cls(last_interaction_type=hdf['/simulation/transport/last_interaction_type'].to_numpy()[emitted_packet_mask], last_line_interaction_in_id=hdf['/simulation/transport/last_line_interaction_in_id'].to_numpy()[emitted_packet_mask], last_line_interaction_out_id=hdf['/simulation/transport/last_line_interaction_out_id'].to_numpy()[emitted_packet_mask], last_line_interaction_in_nu=u.Quantity(hdf['/simulation/transport/last_interaction_in_nu'].to_numpy()[emitted_packet_mask], 'Hz'), lines_df=lines_df, packet_nus=u.Quantity(hdf['/simulation/transport/output_nu'].to_numpy()[emitted_packet_mask], 'Hz'), packet_energies=u.Quantity(hdf['/simulation/transport/output_energy'].to_numpy()[emitted_packet_mask], 'erg'), r_inner=r_inner, spectrum_delta_frequency=u.Quantity(hdf['/simulation/transport/spectrum/scalars'].delta_frequency, 'Hz'), spectrum_frequency_bins=u.Quantity(hdf['/simulation/transport/spectrum/_frequency'].to_numpy(), 'Hz'), spectrum_luminosity_density_lambda=u.Quantity(hdf['/simulation/transport/spectrum/luminosity_density_lambda'].to_numpy(), 'erg / s cm').to('erg / s AA'), spectrum_wavelength=u.Quantity(hdf['/simulation/transport/spectrum/wavelength'].to_numpy(), 'cm').to('AA'), t_inner=t_inner, time_of_simulation=time_of_simulation)
else:
raise ValueError("Invalid value passed to packets_mode. Only allowed values are 'virtual' or 'real'") |
def select_device(device='', batch_size=None):
s = f'YOLOR {(git_describe() or date_modified())} torch {torch.__version__} '
cpu = (device.lower() == 'cpu')
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
elif device:
os.environ['CUDA_VISIBLE_DEVICES'] = device
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested'
cuda = ((not cpu) and torch.cuda.is_available())
if cuda:
n = torch.cuda.device_count()
if ((n > 1) and batch_size):
assert ((batch_size % n) == 0), f'batch-size {batch_size} not multiple of GPU count {n}'
space = (' ' * len(s))
for (i, d) in enumerate((device.split(',') if device else range(n))):
p = torch.cuda.get_device_properties(i)
s += f'''{('' if (i == 0) else space)}CUDA:{d} ({p.name}, {(p.total_memory / (1024 ** 2))}MB)
'''
else:
s += 'CPU\n'
logger.info((s.encode().decode('ascii', 'ignore') if (platform.system() == 'Windows') else s))
return torch.device(('cuda:0' if cuda else 'cpu')) |
def setup_warn_with_traceback():
import warnings
from returnn.util.better_exchook import print_tb
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = (file if hasattr(file, 'write') else sys.stderr)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
print_tb(sys._getframe(), file=log)
warnings.showwarning = warn_with_traceback |
def _make_explanation(a, b):
return (['--- actual / +++ expected'] + [line.strip('\n') for line in difflib.ndiff(a, b)]) |
class BgpLookingGlassService(Service):
__emulator: Emulator
def __init__(self):
super().__init__()
self.addDependency('Routing', False, False)
def _createServer(self) -> Server:
return BgpLookingGlassServer()
def _doConfigure(self, node: Node, server: BgpLookingGlassServer):
super()._doConfigure(node, server)
server.bind(self.__emulator)
def configure(self, emulator: Emulator):
self.__emulator = emulator
return super().configure(emulator)
def getName(self) -> str:
return 'BgpLookingGlassService'
def print(self, indent: int) -> str:
out = (' ' * indent)
out += 'BgpLookingGlassServiceLayer\n'
return out |
class CatModel(torch.nn.Module):
def __init__(self):
super(CatModel, self).__init__()
def forward(self, x):
return torch.cat([x, x]) |
class BaseANN(object):
def done(self):
pass
def get_memory_usage(self):
return (psutil.Process().memory_info().rss / 1024)
def fit(self, X):
pass
def query(self, q, n):
return []
def batch_query(self, X, n):
pool = ThreadPool()
self.res = pool.map((lambda q: self.query(q, n)), X)
def delete(self, id, X):
X = numpy.delete(X, id, axis=0)
self.fit(X)
def get_batch_results(self):
return self.res
def get_additional(self):
return {}
def __str__(self):
return self.name |
def get_prediction_json_path(prediction_dir: str) -> str:
files_in_dir = os.listdir(prediction_dir)
files_in_dir = [f for f in files_in_dir if f.endswith('.json')]
assert (len(files_in_dir) == 1), 'Error: The submission .zip file must contain exactly one .json file.'
prediction_json_path = os.path.join(prediction_dir, files_in_dir[0])
assert os.path.exists(prediction_json_path), 'Error: JSON result file {} does not exist!'.format(prediction_json_path)
return prediction_json_path |
def register_Ns3UanMacRcGw_methods(root_module, cls):
cls.add_constructor([param('ns3::UanMacRcGw const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True)
cls.add_method('AttachPhy', 'void', [param('ns3::Ptr< ns3::UanPhy >', 'phy')], is_virtual=True)
cls.add_method('Clear', 'void', [], is_virtual=True)
cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'pkt'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetAddress', 'void', [param('ns3::UanAddress', 'addr')], is_virtual=True)
cls.add_method('SetForwardUpCb', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::UanAddress const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
scalar_bool_values = sources[1]
types_api = sources[2]
multiarray_funcs = sources[3]
multiarray_api = sources[:]
module_list = []
extension_list = []
init_list = []
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API', multiarray_funcs)
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name][0]
annotations = multiarray_funcs[name][1:]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, f.return_type, f.args, api_name)
for (name, val) in global_vars.items():
(index, type) = val
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for (name, val) in scalar_bool_values.items():
index = val[0]
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for (name, val) in types_api.items():
index = val[0]
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if (len(multiarray_api_dict) != len(multiarray_api_index)):
keys_dict = set(multiarray_api_dict.keys())
keys_index = set(multiarray_api_index.keys())
raise AssertionError('Multiarray API size mismatch - index has extra keys {}, dict has extra keys {}'.format((keys_index - keys_dict), (keys_dict - keys_index)))
extension_list = []
for (name, index) in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
s = (h_template % ('\n'.join(module_list), '\n'.join(extension_list)))
genapi.write_file(header_file, s)
s = (c_template % ',\n'.join(init_list))
genapi.write_file(c_file, s)
s = c_api_header
for func in numpyapi_list:
s += func.to_ReST()
s += '\n\n'
genapi.write_file(doc_file, s)
return targets |
def expand_input_by_factor(n, divisible_by=8):
return (lambda num_inputs, **_: _make_divisible((num_inputs * n), divisible_by)) |
def test_get_aggregated_tensor_privileged_function(tensor_db):
collaborator_weight_dict = {'col1': 0.1, 'col2': 0.9}
class PrivilegedSum(AggregationFunction):
def __init__(self):
super().__init__()
self._privileged = True
def call(self, local_tensors, *_):
tensors = [local_tensor.tensor for local_tensor in local_tensors]
return np.sum(tensors, axis=0)
tensor_key = TensorKey('tensor_name', 'agg', 0, False, ())
agg_nparray = tensor_db.get_aggregated_tensor(tensor_key, collaborator_weight_dict, PrivilegedSum())
assert np.array_equal(agg_nparray, np.array([2, 4, 6, 8, 10])) |
def hook_linear(m, x, y):
flops_per_ele = m.in_features
if (m.bias is not None):
flops_per_ele += 1
flops = (flops_per_ele * y.numel())
return int(flops) |
def _draw_cross(img, pt, color, size=4, thickness=2):
p0 = ((pt[0] - size), (pt[1] - size))
p1 = ((pt[0] + size), (pt[1] + size))
p2 = ((pt[0] + size), (pt[1] - size))
p3 = ((pt[0] - size), (pt[1] + size))
_draw_line(img, p0, p1, color, thickness)
_draw_line(img, p2, p3, color, thickness) |
class AgentBoxesWithFadedHistory(AgentRepresentation):
def __init__(self, helper: PredictHelper, seconds_of_history: float=2, frequency_in_hz: float=2, resolution: float=0.1, meters_ahead: float=40, meters_behind: float=10, meters_left: float=25, meters_right: float=25, color_mapping: Callable[([str], Tuple[(int, int, int)])]=None):
self.helper = helper
self.seconds_of_history = seconds_of_history
self.frequency_in_hz = frequency_in_hz
if (not (resolution > 0)):
raise ValueError(f'Resolution must be positive. Received {resolution}.')
self.resolution = resolution
self.meters_ahead = meters_ahead
self.meters_behind = meters_behind
self.meters_left = meters_left
self.meters_right = meters_right
if (not color_mapping):
color_mapping = default_colors
self.color_mapping = color_mapping
def make_representation(self, instance_token: str, sample_token: str) -> np.ndarray:
buffer = (max([self.meters_ahead, self.meters_behind, self.meters_left, self.meters_right]) * 2)
image_side_length = int((buffer / self.resolution))
central_track_pixels = ((image_side_length / 2), (image_side_length / 2))
base_image = np.zeros((image_side_length, image_side_length, 3))
history = self.helper.get_past_for_sample(sample_token, self.seconds_of_history, in_agent_frame=False, just_xy=False)
history = reverse_history(history)
present_time = self.helper.get_annotations_for_sample(sample_token)
history = add_present_time_to_history(present_time, history)
center_agent_annotation = self.helper.get_sample_annotation(instance_token, sample_token)
draw_agent_boxes(center_agent_annotation, central_track_pixels, history, base_image, resolution=self.resolution, get_color=self.color_mapping)
center_agent_yaw = quaternion_yaw(Quaternion(center_agent_annotation['rotation']))
rotation_mat = get_rotation_matrix(base_image.shape, center_agent_yaw)
rotated_image = cv2.warpAffine(base_image, rotation_mat, (base_image.shape[1], base_image.shape[0]))
(row_crop, col_crop) = get_crops(self.meters_ahead, self.meters_behind, self.meters_left, self.meters_right, self.resolution, image_side_length)
return rotated_image[(row_crop, col_crop)].astype('uint8') |
class TruncatedNormal(pyd.Normal):
def __init__(self, loc, scale, low=(- 1.0), high=1.0, eps=1e-06):
super().__init__(loc, scale, validate_args=False)
self.low = low
self.high = high
self.eps = eps
def _clamp(self, x):
clamped_x = torch.clamp(x, (self.low + self.eps), (self.high - self.eps))
x = ((x - x.detach()) + clamped_x.detach())
return x
def sample(self, clip=None, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
eps *= self.scale
if (clip is not None):
eps = torch.clamp(eps, (- clip), clip)
x = (self.loc + eps)
return self._clamp(x) |
_model_architecture('transformer_lm', 'transformer_lm_gpt')
def transformer_lm_gpt(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu_accurate')
base_lm_architecture(args) |
class RunMode(Enum):
TPU_STATIC = 0
TPU_DYNAMIC = 1
CPU = 2
LOOP = 3
SWITCH = 4
MERGE = 5
UNKNOWN = 9 |
def register_Ns3UanTransducerHd_methods(root_module, cls):
cls.add_constructor([param('ns3::UanTransducerHd const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddPhy', 'void', [param('ns3::Ptr< ns3::UanPhy >', 'arg0')], is_virtual=True)
cls.add_method('ApplyRxGainDb', 'double', [param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'mode')], is_virtual=True)
cls.add_method('Clear', 'void', [], is_virtual=True)
cls.add_method('GetArrivalList', 'ns3::UanTransducer::ArrivalList const &', [], is_const=True, is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::UanChannel >', [], is_const=True, is_virtual=True)
cls.add_method('GetPhyList', 'ns3::UanTransducer::UanPhyList const &', [], is_const=True, is_virtual=True)
cls.add_method('GetRxGainDb', 'double', [], is_virtual=True)
cls.add_method('GetState', 'ns3::UanTransducer::State', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsRx', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsTx', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'rxPowerDb'), param('ns3::UanTxMode', 'txMode'), param('ns3::UanPdp', 'pdp')], is_virtual=True)
cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::UanChannel >', 'chan')], is_virtual=True)
cls.add_method('SetRxGainDb', 'void', [param('double', 'gainDb')], is_virtual=True)
cls.add_method('Transmit', 'void', [param('ns3::Ptr< ns3::UanPhy >', 'src'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('double', 'txPowerDb'), param('ns3::UanTxMode', 'txMode')], is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
class ProjectiveConic_number_field(ProjectiveConic_field):
def __init__(self, A, f):
ProjectiveConic_field.__init__(self, A, f)
self._local_obstruction = None
self._finite_obstructions = None
self._infinite_obstructions = None
def has_rational_point(self, point=False, obstruction=False, algorithm='default', read_cache=True):
if read_cache:
if (self._rational_point is not None):
if (point or obstruction):
return (True, self._rational_point)
else:
return True
if (self._local_obstruction is not None):
if (point or obstruction):
return (False, self._local_obstruction)
else:
return False
if ((not point) and (self._finite_obstructions == []) and (self._infinite_obstructions == [])):
if obstruction:
return (True, None)
return True
if self.has_singular_point():
if point:
return self.has_singular_point(point=True)
if obstruction:
return (True, None)
return True
B = self.base_ring()
if (algorithm == 'default'):
ret = self.has_rational_point(point=True, obstruction=False, algorithm='rnfisnorm', read_cache=False)
if ret[0]:
if (point or obstruction):
return ret
return True
if obstruction:
ret = self.has_rational_point(point=False, obstruction=True, algorithm='local', read_cache=False)
if ret[0]:
raise RuntimeError(('Outputs of algorithms in has_rational_point disagree for conic %s' % self))
return ret
if point:
return (False, None)
return False
if (algorithm == 'local'):
if point:
raise ValueError("Algorithm 'local' cannot be combined with point = True in has_rational_point")
obs = self.local_obstructions(infinite=True, finite=False, read_cache=read_cache)
if obs:
if obstruction:
return (False, obs[0])
return False
obs = self.local_obstructions(read_cache=read_cache)
if (not obs):
if obstruction:
return (True, None)
return True
if obstruction:
return (False, obs[0])
return False
if (algorithm == 'rnfisnorm'):
from sage.modules.free_module_element import vector
if obstruction:
raise ValueError('Algorithm rnfisnorm cannot be combined with obstruction = True in has_rational_point')
(D, T) = self.diagonal_matrix()
abc = [D[(0, 0)], D[(1, 1)], D[(2, 2)]]
for j in range(3):
if (abc[j] == 0):
pt = self.point((T * vector({2: 0, j: 1})))
if (point or obstruction):
return (True, pt)
return True
if ((- abc[1]) / abc[0]).is_square():
pt = self.point((T * vector([((- abc[1]) / abc[0]).sqrt(), 1, 0])))
if (point or obstruction):
return (True, pt)
return True
if ((- abc[2]) / abc[0]).is_square():
pt = self.point((T * vector([((- abc[2]) / abc[0]).sqrt(), 0, 1])))
if (point or obstruction):
return (True, pt)
return True
if is_RationalField(B):
K = B
[KtoB, BtoK] = [K.hom(K) for i in range(2)]
else:
K = B.absolute_field('Y')
[KtoB, BtoK] = K.structure()
X = PolynomialRing(K, 'X').gen()
d = BtoK(((- abc[1]) / abc[0]))
den = d.denominator()
L = K.extension(((X ** 2) - (d * (den ** 2))), names='y')
isnorm = BtoK(((- abc[2]) / abc[0])).is_norm(L, element=True)
if isnorm[0]:
pt = self.point((T * vector([KtoB(isnorm[1][0]), KtoB((isnorm[1][1] * den)), 1])))
if point:
return (True, pt)
return True
if point:
return (False, None)
return False
if (algorithm == 'qfsolve'):
raise TypeError(('Algorithm qfsolve in has_rational_point only for conics over QQ, not over %s' % B))
if obstruction:
raise ValueError(('Invalid combination: obstruction=True and algorithm=%s' % algorithm))
return ProjectiveConic_field.has_rational_point(self, point=point, algorithm=algorithm, read_cache=False)
def is_locally_solvable(self, p):
(D, T) = self.diagonal_matrix()
abc = [D[(j, j)] for j in range(3)]
for a in abc:
if (a == 0):
return True
a = ((- abc[0]) / abc[2])
b = ((- abc[1]) / abc[2])
ret = self.base_ring().hilbert_symbol(a, b, p)
if (ret == (- 1)):
if (self._local_obstruction is None):
from sage.categories.map import Map
from sage.categories.rings import Rings
from sage.rings.qqbar import AA
from sage.rings.real_lazy import RLF
if ((not (isinstance(p, Map) and p.category_for().is_subcategory(Rings()))) or (p.codomain() is AA) or (p.codomain() is RLF)):
self._local_obstruction = p
return False
return True
def local_obstructions(self, finite=True, infinite=True, read_cache=True):
obs0 = []
obs1 = []
B = self.base_ring()
if infinite:
if (read_cache and (self._infinite_obstructions is not None)):
obs0 = self._infinite_obstructions
else:
from sage.rings.qqbar import AA
for b in B.embeddings(AA):
if (not self.is_locally_solvable(b)):
obs0.append(b)
self._infinite_obstructions = obs0
if finite:
if (read_cache and (self._finite_obstructions is not None)):
obs1 = self._finite_obstructions
else:
candidates = []
if (self.determinant() != 0):
O = B.maximal_order()
for a in self.symmetric_matrix().list():
if (a != 0):
for f in O.fractional_ideal(a).factor():
if ((f[1] < 0) and (f[0] not in candidates)):
candidates.append(f[0])
for f in O.fractional_ideal((2 * self.determinant())).factor():
if ((f[1] > 0) and (f[0] not in candidates)):
candidates.append(f[0])
for b in candidates:
if (not self.is_locally_solvable(b)):
obs1.append(b)
self._infinite_obstructions = obs1
obs = (obs1 + obs0)
if (finite and infinite):
assert ((len(obs) % 2) == 0)
return obs |
class TestNormalizeOp(hu.HypothesisTestCase):
(X=hu.tensor(min_dim=1, max_dim=5, elements=hu.floats(min_value=0.5, max_value=1.0)), **hu.gcs)
(max_examples=10, deadline=None)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = (X / np.maximum(np.sqrt((X ** 2).sum(axis=axis, keepdims=True)), 1e-12))
return (x_normed,)
for axis in range((- X.ndim), X.ndim):
x = copy.copy(X)
op = core.CreateOperator('Normalize', 'X', 'Y', axis=axis)
self.assertReferenceChecks(gc, op, [x], functools.partial(ref_normalize, axis=axis))
self.assertDeviceChecks(dc, op, [x], [0])
self.assertGradientChecks(gc, op, [x], 0, [0])
(X=hu.tensor(min_dim=1, max_dim=5, elements=hu.floats(min_value=0.5, max_value=1.0)), **hu.gcs)
(max_examples=10, deadline=None)
def test_normalize_L1(self, X, gc, dc):
def ref(X, axis):
norm = abs(X).sum(axis=axis, keepdims=True)
return ((X / norm),)
for axis in range((- X.ndim), X.ndim):
print('axis: ', axis)
op = core.CreateOperator('NormalizeL1', 'X', 'Y', axis=axis)
self.assertReferenceChecks(gc, op, [X], functools.partial(ref, axis=axis))
self.assertDeviceChecks(dc, op, [X], [0]) |
def _add_category_id_to_contiguous_id_maps_to_metadata(dataset_names: Iterable[str]):
merged_categories = {}
for dataset_name in dataset_names:
meta = MetadataCatalog.get(dataset_name)
for (cat_id, cat_name) in meta.categories.items():
if (cat_id not in merged_categories):
merged_categories[cat_id] = (cat_name, dataset_name)
continue
(cat_name_other, dataset_name_other) = merged_categories[cat_id]
if (cat_name_other != cat_name):
raise ValueError(f'Incompatible categories for category ID {cat_id}: dataset {dataset_name} value "{cat_name}", dataset {dataset_name_other} value "{cat_name_other}"')
merged_cat_id_to_cont_id = {}
for (i, cat_id) in enumerate(sorted(merged_categories.keys())):
merged_cat_id_to_cont_id[cat_id] = i
for dataset_name in dataset_names:
meta = MetadataCatalog.get(dataset_name)
categories = meta.get('categories')
meta.thing_classes = [categories[cat_id] for cat_id in sorted(categories.keys())]
meta.thing_dataset_id_to_contiguous_id = {cat_id: merged_cat_id_to_cont_id[cat_id] for cat_id in sorted(categories.keys())}
meta.thing_contiguous_id_to_dataset_id = {merged_cat_id_to_cont_id[cat_id]: cat_id for cat_id in sorted(categories.keys())} |
_start_docstrings(AutoModelForMaskedLM.__doc__)
def modelForMaskedLM(*args, **kwargs):
return AutoModelForMaskedLM.from_pretrained(*args, **kwargs) |
def zeros(shape, dtype=None, order='C'):
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a |
class Euclidean(Module):
def __init__(self, inputSize, outputSize):
super(Euclidean, self).__init__()
self.weight = torch.Tensor(inputSize, outputSize)
self.gradWeight = torch.Tensor(inputSize, outputSize)
self.gradInput.resize_(inputSize)
self.output.resize_(outputSize)
self.fastBackward = True
self.reset()
self._input = None
self._weight = None
self._expand = None
self._expand2 = None
self._repeat = None
self._repeat2 = None
self._div = None
self._output = None
self._gradOutput = None
self._expand3 = None
self._sum = None
def reset(self, stdv=None):
if (stdv is not None):
stdv = (stdv * math.sqrt(3))
else:
stdv = (1.0 / math.sqrt(self.weight.size(0)))
self.weight.uniform_((- stdv), stdv)
def _view(self, res, src, *args):
if src.is_contiguous():
res.set_(src.view(*args))
else:
res.set_(src.contiguous().view(*args))
def updateOutput(self, input):
if (self._input is None):
self._input = input.new()
if (self._weight is None):
self._weight = self.weight.new()
if (self._expand is None):
self._expand = self.output.new()
if (self._expand2 is None):
self._expand2 = self.output.new()
if (self._repeat is None):
self._repeat = self.output.new()
if (self._repeat2 is None):
self._repeat2 = self.output.new()
(inputSize, outputSize) = (self.weight.size(0), self.weight.size(1))
assert (input.dim() == 2)
batchSize = input.size(0)
self._view(self._input, input, batchSize, inputSize, 1)
self._expand = self._input.expand(batchSize, inputSize, outputSize)
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._weight = self.weight.view(1, inputSize, outputSize)
self._expand2 = self._weight.expand_as(self._repeat)
if (torch.typename(input) == 'torch.cuda.FloatTensor'):
self._repeat2.resize_as_(self._expand2).copy_(self._expand2)
self._repeat.add_((- 1), self._repeat2)
else:
self._repeat.add_((- 1), self._expand2)
torch.norm(self._repeat, 2, 1, True, out=self.output)
self.output.resize_(batchSize, outputSize)
return self.output
def updateGradInput(self, input, gradOutput):
if (self.gradInput is None):
return
if (self._div is None):
self._div = input.new()
if (self._output is None):
self._output = self.output.new()
if (self._gradOutput is None):
self._gradOutput = input.new()
if (self._expand3 is None):
self._expand3 = input.new()
if (not self.fastBackward):
self.updateOutput(input)
(inputSize, outputSize) = (self.weight.size(0), self.weight.size(1))
self._output.resize_as_(self.output).copy_(self.output).add_(1e-07)
self._view(self._gradOutput, gradOutput, gradOutput.size())
torch.div(gradOutput, self._output, out=self._div)
assert (input.dim() == 2)
batchSize = input.size(0)
self._div.resize_(batchSize, 1, outputSize)
self._expand3 = self._div.expand(batchSize, inputSize, outputSize)
if (torch.typename(input) == 'torch.cuda.FloatTensor'):
self._repeat2.resize_as_(self._expand3).copy_(self._expand3)
self._repeat2.mul_(self._repeat)
else:
torch.mul(self._repeat, self._expand3, out=self._repeat2)
torch.sum(self._repeat2, 2, True, out=self.gradInput)
self.gradInput.resize_as_(input)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
(inputSize, outputSize) = (self.weight.size(0), self.weight.size(1))
assert (input.dim() == 2)
if (self._sum is None):
self._sum = input.new()
torch.sum(self._repeat2, 0, True, out=self._sum)
self._sum.resize_(inputSize, outputSize)
self.gradWeight.add_((- scale), self._sum)
def type(self, type=None, tensorCache=None):
if type:
self.clearState()
return super(Euclidean, self).type(type, tensorCache)
def clearState(self):
clear(self, ['_input', '_output', '_gradOutput', '_weight', '_div', '_sum', '_expand', '_expand2', '_expand3', '_repeat', '_repeat2'])
return super(Euclidean, self).clearState() |
def get_female_dominant_sources(topicsDF, delta=1):
femaleSourcesDF = topicsDF.drop('topicDistribution').filter('sourcesFemaleCount - sourcesMaleCount >= {}'.format(delta))
return femaleSourcesDF |
def VI_Block(X, S1, S2, config):
k = config.k
ch_i = config.ch_i
ch_h = config.ch_h
ch_q = config.ch_q
state_batch_size = config.statebatchsize
bias = tf.Variable((np.random.randn(1, 1, 1, ch_h) * 0.01), dtype=tf.float32)
w0 = tf.Variable((np.random.randn(3, 3, ch_i, ch_h) * 0.01), dtype=tf.float32)
w1 = tf.Variable((np.random.randn(1, 1, ch_h, 1) * 0.01), dtype=tf.float32)
w = tf.Variable((np.random.randn(3, 3, 1, ch_q) * 0.01), dtype=tf.float32)
w_fb = tf.Variable((np.random.randn(3, 3, 1, ch_q) * 0.01), dtype=tf.float32)
w_o = tf.Variable((np.random.randn(ch_q, 8) * 0.01), dtype=tf.float32)
h = (conv2d_flipkernel(X, w0, name='h0') + bias)
r = conv2d_flipkernel(h, w1, name='r')
q = conv2d_flipkernel(r, w, name='q')
v = tf.reduce_max(q, axis=3, keep_dims=True, name='v')
for i in range(0, (k - 1)):
rv = tf.concat([r, v], 3)
wwfb = tf.concat([w, w_fb], 2)
q = conv2d_flipkernel(rv, wwfb, name='q')
v = tf.reduce_max(q, axis=3, keep_dims=True, name='v')
q = conv2d_flipkernel(tf.concat([r, v], 3), tf.concat([w, w_fb], 2), name='q')
q = tf.transpose(q, perm=[0, 3, 1, 2])
bs = tf.shape(q)[0]
rprn = tf.reshape(tf.tile(tf.reshape(tf.range(bs), [(- 1), 1]), [1, state_batch_size]), [(- 1)])
ins1 = tf.cast(tf.reshape(S1, [(- 1)]), tf.int32)
ins2 = tf.cast(tf.reshape(S2, [(- 1)]), tf.int32)
idx_in = tf.transpose(tf.stack([ins1, ins2, rprn]), [1, 0])
q_out = tf.gather_nd(tf.transpose(q, [2, 3, 0, 1]), idx_in, name='q_out')
logits = tf.matmul(q_out, w_o)
output = tf.nn.softmax(logits, name='output')
return (logits, output) |
class Noisy_Agent(RandomScriptAgent):
def __init__(self, onion_ratio, soup_ratio, noise_ratio):
super().__init__({'pickup_onion_and_place_in_pot': dict(prob=(onion_ratio * (1.0 - noise_ratio)), args=dict()), 'pickup_onion_and_place_random': dict(prob=(onion_ratio * noise_ratio), args=dict()), 'pickup_soup_and_deliver': dict(prob=(soup_ratio * (1.0 - noise_ratio)), args=dict()), 'pickup_soup_and_place_random': dict(prob=(soup_ratio * noise_ratio), args=dict())})
def step(self, mdp, state, player_idx):
player = state.players[player_idx]
if ('pickup_onion' in self._current_period_name):
if (not utils.exists(mdp, state, player_idx, terrain_type='P', obj=['empty', 'unfull_soup'])):
(self._current_period_name, self._current_period) = self.make_new_period(i=[2, 3])
self._current_period.reset(mdp, state, player_idx)
if (self._current_period_name == 'pickup_soup_and_deliver'):
if ((not (player.has_object() and (player.get_object().name == 'soup'))) and (not utils.exists(mdp, state, player_idx, terrain_type='P', obj=['soup', 'cooking_soup']))):
(self._current_period_name, self._current_period) = self.make_new_period(i=0)
self._current_period.reset(mdp, state, player_idx)
return super(Noisy_Agent, self).step(mdp, state, player_idx) |
class PoincareParticles(MutableMapping):
def __init__(self, poincare):
self.poincare = poincare
def __getitem__(self, i):
if (i == 0):
return PoincareParticle(G=np.nan, m=np.nan, Mstar=np.nan, l=np.nan, eta=np.nan, rho=np.nan, Lambda=np.nan, kappa=np.nan, sigma=np.nan)
p = self.poincare
if isinstance(i, slice):
return [self[i] for i in range(*i.indices(p.N))]
if (i < 0):
i += p.N
if ((i < 0) or (i >= p.N)):
raise AttributeError('Index {0} used to access particles out of range.'.format(i))
if (p.masses[i] == 0):
raise AttributeError('Current implementation of Poincare does not work with test particles')
val = p.values
j = (i - 1)
l = val[(j * 3)]
eta = val[((j * 3) + 1)]
rho = val[((j * 3) + 2)]
Lambda = val[(p.N_dof + (j * 3))]
kappa = val[((p.N_dof + (j * 3)) + 1)]
sigma = val[((p.N_dof + (j * 3)) + 2)]
return PoincareParticle(coordinates=p.coordinates, G=p.G, m=p.masses[i], Mstar=p.masses[0], l=l, eta=eta, rho=rho, Lambda=Lambda, kappa=kappa, sigma=sigma)
def __setitem__(self, key, value):
raise AttributeError("Can't set Poincare particle attributes")
def __delitem__(self, key):
raise AttributeError('deleting variables not implemented.')
def __iter__(self):
for p in self[:self.poincare.N]:
(yield p)
def __len__(self):
return self.poincare.N |
def test_flux_nu(spectrum):
if (getattr(spectrum, 'distance', None) is not None):
with pytest.warns(DeprecationWarning):
test_helper.assert_quantity_allclose(spectrum.flux_nu, spectrum.luminosity_to_flux(spectrum.luminosity_density_nu, spectrum.distance))
else:
with pytest.raises(AttributeError):
spectrum.flux_nu |
def str2list(s, out_type=None):
s = s.replace('[', '').replace(']', '')
s = s.replace("'", '')
s = s.split(', ')
if (out_type is not None):
s = [out_type(ss) for ss in s]
return s |
def build_transformer(args):
return Transformer(d_model=args['NN']['hidden_dim'], dropout=args['NN']['dropout'], nhead=args['NN']['nheads'], dim_feedforward=args['NN']['dim_feedforward'], num_encoder_layers=args['NN']['enc_layers'], num_decoder_layers=args['NN']['dec_layers'], normalize_before=args['NN']['pre_norm'], return_intermediate_dec=True) |
def main():
img_embs = np.load('../data/img_embstrain.npy')
cap_embs = np.load('../data/cap_embstrain.npy')
print(np.shape(img_embs), np.shape(cap_embs))
img_embs = torch.from_numpy(img_embs).half().cuda()
cap_embs = torch.from_numpy(cap_embs).half().cuda()
t2ihn = np.zeros((len(cap_embs), totalsavecandi)).astype('int32')
for i in range(len(cap_embs)):
cap = cap_embs[i:(i + 1)]
simi = (img_embs * cap).sum(1)
if ((i % 50) == 0):
(scoret, topt) = torch.sort(simi, descending=True)
topt = topt.cpu().numpy().copy()
print(i, np.where((topt == (i / 5))))
simi[(i / 5)] = (- inf)
(score, top) = torch.topk(simi, totalsavecandi)
t2ihn[i] = top.cpu().numpy().copy().astype('int32')
np.save('../offlinecandidates/t2i_coco.npy', t2ihn) |
def test_arraytype_9():
text = str(ak.Array([(1, 1.1), (2, 2.2), (3, 3.3)]).type)
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert (str(parsedtype) == text) |
def bert2vit_ckpt_rename(state_dict, layerCount=8):
out_Order_dict = OrderedDict({})
for layer in range(0, layerCount):
bert_q_weight_key = (('bert.encoder.layer.' + str(layer)) + '.attention.self.query.weight')
bert_q_bias_key = (('bert.encoder.layer.' + str(layer)) + '.attention.self.query.bias')
bert_k_weight_key = (('bert.encoder.layer.' + str(layer)) + '.attention.self.key.weight')
bert_k_bias_key = (('bert.encoder.layer.' + str(layer)) + '.attention.self.key.bias')
bert_v_weight_key = (('bert.encoder.layer.' + str(layer)) + '.attention.self.value.weight')
bert_v_bias_key = (('bert.encoder.layer.' + str(layer)) + '.attention.self.value.bias')
pvit_weight_key = (('blocks.' + str(layer)) + '.attn.qkv.weight')
pvit_bias_key = (('blocks.' + str(layer)) + '.attn.qkv.bias')
mergedQKV_weight = torch.cat((state_dict[bert_q_weight_key], state_dict[bert_k_weight_key], state_dict[bert_v_weight_key]), 0)
mergedQKV_bias = torch.cat((state_dict[bert_q_bias_key], state_dict[bert_k_bias_key], state_dict[bert_v_bias_key]), 0)
out_Order_dict[pvit_weight_key] = mergedQKV_weight
out_Order_dict[pvit_bias_key] = mergedQKV_bias
for key in state_dict.keys():
if ('attention.output.dense' in key):
newKey = key.replace('attention.output.dense', 'attn.proj')
newKey = newKey.replace('bert.encoder.layer', 'blocks')
out_Order_dict[newKey] = state_dict[key]
elif ('attention.output.LayerNorm' in key):
newKey = key.replace('attention.output.LayerNorm', 'norm1')
newKey = newKey.replace('bert.encoder.layer', 'blocks')
out_Order_dict[newKey] = state_dict[key]
elif ('intermediate.dense' in key):
newKey = key.replace('intermediate.dense', 'mlp.fc1')
newKey = newKey.replace('bert.encoder.layer', 'blocks')
out_Order_dict[newKey] = state_dict[key]
elif (('output.dense' in key) and ('attention' not in key)):
newKey = key.replace('output.dense', 'mlp.fc2')
newKey = newKey.replace('bert.encoder.layer', 'blocks')
out_Order_dict[newKey] = state_dict[key]
elif ('output.LayerNorm' in key):
newKey = key.replace('output.LayerNorm', 'norm2')
newKey = newKey.replace('bert.encoder.layer', 'blocks')
out_Order_dict[newKey] = state_dict[key]
return out_Order_dict |
def remove_index_types(G, to_track, to_reverse_track):
found_track = False
for e in G.edges(data=True):
if (e[2]['stmt'] == to_track):
print('Found ', to_track)
found_track = True
if ((re.match('<%ID> = extractelement', e[2]['stmt']) is not None) or (re.match('<%ID> = insertelement', e[2]['stmt']) is not None)):
e[2]['stmt'] = re.sub('i\\d+ ', '<TYP> ', e[2]['stmt'])
if found_track:
if (e[2]['stmt'] == to_track):
print('... remained unchanged by "remove index types"')
else:
print('became', e[2]['stmt'], 'in "remove index types"')
to_track = e[2]['stmt']
found_track = False
if (e[2]['stmt'] == to_reverse_track):
print('Found ', e[2]['stmt'])
return (G, to_track) |
class TFMultipleChoiceModelOutput(ModelOutput):
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None |
class Hypothesis():
def __init__(self, trgt_sentence, total_score, score_breakdown=[], base_score=0.0, statistics=None):
self.trgt_sentence = trgt_sentence
self.total_score = total_score
self.score_breakdown = score_breakdown
self.base_score = base_score
self.statistics = statistics
def __repr__(self):
return ('%s (%f)' % (' '.join((str(w) for w in self.trgt_sentence)), self.total_score))
def __len__(self):
return len(self.trgt_sentence)
def __lt__(self, other):
return (self.total_score < other.total_score) |
def clean_no_orgnr(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".')
df = to_dask(df)
df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object)
df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0)))
df = df.rename(columns={'_temp_': f'{column}_clean'})
df = df.drop(columns=['clean_code_tup'])
if inplace:
df[column] = df[f'{column}_clean']
df = df.drop(columns=f'{column}_clean')
df = df.rename(columns={column: f'{column}_clean'})
with ProgressBar(minimum=1, disable=(not progress)):
df = df.compute()
return df |
def mkdirs(config, trial_idx):
evals_dir = 'evals'
logs_dir = 'logs'
saves_dir = 'saves'
if (not os.path.exists(evals_dir)):
os.mkdir(evals_dir)
if (not os.path.exists(logs_dir)):
os.mkdir(logs_dir)
if (not os.path.exists(saves_dir)):
os.mkdir(saves_dir)
model_name = config.model_name
config_id = str(config.config_id).zfill(2)
run_id = str(config.run_id).zfill(2)
trial_idx = str(trial_idx).zfill(2)
task = config.task.zfill(2)
mid = config.lang
if config.large:
mid += '-10k'
subdir_name = '-'.join([task, config_id, run_id, trial_idx])
eval_dir = os.path.join(evals_dir, model_name, mid)
eval_subdir = os.path.join(eval_dir, subdir_name)
log_dir = os.path.join(logs_dir, model_name, mid)
log_subdir = os.path.join(log_dir, subdir_name)
save_dir = os.path.join(saves_dir, model_name, mid)
save_subdir = os.path.join(save_dir, subdir_name)
config.eval_dir = eval_subdir
config.log_dir = log_subdir
config.save_dir = save_subdir
if (not os.path.exists(eval_dir)):
os.makedirs(eval_dir)
if os.path.exists(eval_subdir):
if (config.train and (not config.load)):
shutil.rmtree(eval_subdir)
os.mkdir(eval_subdir)
else:
os.mkdir(eval_subdir)
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
if os.path.exists(log_subdir):
if (config.train and (not config.load)):
shutil.rmtree(log_subdir)
os.mkdir(log_subdir)
else:
os.makedirs(log_subdir)
if config.train:
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
if os.path.exists(save_subdir):
if (not config.load):
shutil.rmtree(save_subdir)
os.mkdir(save_subdir)
else:
os.mkdir(save_subdir) |
def test_arit3():
x = Symbol('x')
y = Symbol('y')
raises(TypeError, (lambda : ('x' * x))) |
def register_types(module):
root_module = module.get_root()
module.add_enum('QueueSizeUnit', ['PACKETS', 'BYTES'], import_from_module='ns.network')
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core')
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DataRate', import_from_module='ns.network')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::QueueItem'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('EventId', import_from_module='ns.core')
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('LogComponent', import_from_module='ns.core')
typehandlers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >', u'ns3::LogComponent::ComponentList')
typehandlers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >*', u'ns3::LogComponent::ComponentList*')
typehandlers.add_type_alias(u'std::map< std::string, ns3::LogComponent * >&', u'ns3::LogComponent::ComponentList&')
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )', u'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )*', u'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )&', u'ns3::Mac48Address::TracedCallback&')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Mac8Address', import_from_module='ns.network')
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', u'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', u'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', u'ns3::NetDeviceContainer::Iterator&')
module.add_class('NodeContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator', u'ns3::NodeContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator*', u'ns3::NodeContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator&', u'ns3::NodeContainer::Iterator&')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('ParameterLogger', import_from_module='ns.core')
module.add_class('PcapFile', import_from_module='ns.network')
module.add_class('PcapHelper', import_from_module='ns.network')
module.add_enum('DataLinkType', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_LINUX_SLL', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO', 'DLT_IEEE802_15_4', 'DLT_NETLINK'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
module.add_class('PointToPointHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
module.add_class('QueueSize', import_from_module='ns.network')
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned int'])
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('PppHeader', parent=root_module['ns3::Header'])
module.add_class('QueueBase', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ErrorModel', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('ListErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel'])
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::PromiscReceiveCallback&')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::ProtocolHandler')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::ProtocolHandler*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::ProtocolHandler&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::DeviceAdditionListener')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::DeviceAdditionListener*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::DeviceAdditionListener&')
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )', u'ns3::Packet::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )*', u'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )&', u'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', u'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', u'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', u'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', u'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', u'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', u'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', u'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', u'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', u'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', u'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', u'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', u'ns3::Packet::SinrTracedCallback&')
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('PointToPointChannel', parent=root_module['ns3::Channel'])
module.add_class('PointToPointNetDevice', parent=root_module['ns3::NetDevice'])
module.add_class('PointToPointRemoteChannel', parent=root_module['ns3::PointToPointChannel'])
module.add_class('Queue', import_from_module='ns.network', template_parameters=['ns3::Packet'], parent=root_module['ns3::QueueBase'])
typehandlers.add_type_alias(u'ns3::Packet', u'ns3::Queue< ns3::Packet > ItemType')
typehandlers.add_type_alias(u'ns3::Packet*', u'ns3::Queue< ns3::Packet > ItemType*')
typehandlers.add_type_alias(u'ns3::Packet&', u'ns3::Queue< ns3::Packet > ItemType&')
module.add_typedef(root_module['ns3::Packet'], 'ItemType')
module.add_class('Queue', import_from_module='ns.network', template_parameters=['ns3::QueueDiscItem'], parent=root_module['ns3::QueueBase'])
typehandlers.add_type_alias(u'ns3::QueueDiscItem', u'ns3::Queue< ns3::QueueDiscItem > ItemType')
typehandlers.add_type_alias(u'ns3::QueueDiscItem*', u'ns3::Queue< ns3::QueueDiscItem > ItemType*')
typehandlers.add_type_alias(u'ns3::QueueDiscItem&', u'ns3::Queue< ns3::QueueDiscItem > ItemType&')
module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
module.add_enum('Uint8Values', ['IP_DSFIELD'], outer_class=root_module['ns3::QueueItem'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::QueueItem const > )', u'ns3::QueueItem::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::QueueItem const > )*', u'ns3::QueueItem::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::QueueItem const > )&', u'ns3::QueueItem::TracedCallback&')
module.add_class('QueueSizeChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('QueueSizeValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('RateErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel'])
module.add_enum('ErrorUnit', ['ERROR_UNIT_BIT', 'ERROR_UNIT_BYTE', 'ERROR_UNIT_PACKET'], outer_class=root_module['ns3::RateErrorModel'], import_from_module='ns.network')
module.add_class('ReceiveListErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('BinaryErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel'])
module.add_class('BurstErrorModel', import_from_module='ns.network', parent=root_module['ns3::ErrorModel'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Time', 'ns3::Time', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::QueueDiscItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'unsigned int', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('QueueDiscItem', import_from_module='ns.network', parent=root_module['ns3::QueueItem'])
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type=u'map')
module.add_container('std::list< unsigned int >', 'unsigned int', container_type=u'list')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )', u'ns3::TimePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )*', u'ns3::TimePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )&', u'ns3::TimePrinter&')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )', u'ns3::NodePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )*', u'ns3::NodePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )&', u'ns3::NodePrinter&')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module) |
def run_job(logger, opt, output_dir, output_dir_ckpt, train):
device_id = allocate_device()
opt_override = {'device': device_id}
def merge(a, b):
d = {}
d.update(a)
d.update(b)
return d
opt = merge(opt, opt_override)
logger.info('new job: job_id={}, device_id={}'.format(opt['job_id'], opt['device']))
try:
logger.info('spawning process: job_id={}, device_id={}'.format(opt['job_id'], opt['device']))
try:
output_dir_thread = os.path.join(output_dir, str(opt['job_id']))
os.makedirs(output_dir_thread, exist_ok=True)
output_dir_thread_ckpt = os.path.join(output_dir_ckpt, str(opt['job_id']))
os.makedirs(output_dir_thread_ckpt, exist_ok=True)
run_job_lock.acquire()
manager = multiprocessing.Manager()
return_dict = manager.dict()
p = multiprocessing.Process(target=train, args=(opt, output_dir, output_dir_thread, output_dir_thread_ckpt, return_dict))
p.start()
finally:
run_job_lock.release()
p.join()
logger.info('finished process: job_id={}, device_id={}'.format(opt['job_id'], opt['device']))
return return_dict['stats']
finally:
free_device(device_id) |
class TestSolveLyapunov():
cases = [(np.array([[1, 2], [3, 4]]), np.array([[9, 10], [11, 12]])), (np.array([[(1.0 + 1j), 2.0], [(3.0 - 4j), 5.0]]), np.array([[(2.0 - 2j), (2.0 + 2j)], [((- 1.0) - 1j), 2.0]])), (np.array([[1.0, 2.0], [3.0, 5.0]]), np.array([[(2.0 - 2j), (2.0 + 2j)], [((- 1.0) - 1j), 2.0]])), (np.array([[(1.0 + 1j), 2.0], [(3.0 - 4j), 5.0]]), np.array([[2.0, 2.0], [(- 1.0), 2.0]])), (np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3], [1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]), np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3], [0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])), (np.array([[(0.1 + 0j), (0.091 + 0j), (0.082 + 0j), (0.073 + 0j), (0.064 + 0j), (0.055 + 0j), (0.046 + 0j), (0.037 + 0j), (0.028 + 0j), (0.019 + 0j), (0.01 + 0j)], [(1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j)]]), np.eye(11)), (matrix([[0, 1], [((- 1) / 2), (- 1)]]), (matrix([0, 3]).T matrix([0, 3]).T.T)), (matrix([[0, 1], [((- 1) / 2), (- 1)]]), np.array((matrix([0, 3]).T matrix([0, 3]).T.T)))]
def test_continuous_squareness_and_shape(self):
nsq = np.ones((3, 2))
sq = np.eye(3)
assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
def check_continuous_case(self, a, q):
x = solve_continuous_lyapunov(a, q)
assert_array_almost_equal((np.dot(a, x) + np.dot(x, a.conj().transpose())), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal((np.dot(np.dot(a, x), a.conj().transpose()) - x), ((- 1.0) * q))
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear') |
class DeformConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=False):
super(DeformConv, self).__init__()
assert (not bias)
assert ((in_channels % groups) == 0), 'in_channels {} cannot be divisible by groups {}'.format(in_channels, groups)
assert ((out_channels % groups) == 0), 'out_channels {} cannot be divisible by groups {}'.format(out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // self.groups), *self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = (1.0 / math.sqrt(n))
self.weight.data.uniform_((- stdv), stdv)
def forward(self, x, offset):
return deform_conv(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) |
class SlopeTrigger(MetricTrigger, StatefulTriggerMixin):
def __init__(self, range, window_size=10):
self.range = range
self.window_size = window_size
self.vals = deque(maxlen=window_size)
def __call__(self, new_value):
self.vals.append(new_value)
if (len(self.vals) < self.window_size):
return False
return (self.range[0] <= self.slope() <= self.range[1])
def slope(self):
x = range(self.window_size)
y = self.vals
(slope, bias) = np.polyfit(x, y, 1)
return slope
def reset(self):
self.vals = deque(maxlen=self.window_size) |
def conv2d_transpose(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', use_xavier=True, stddev=0.001, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None, is_dist=False):
with tf.variable_scope(scope) as sc:
(kernel_h, kernel_w) = kernel_size
num_in_channels = inputs.get_shape()[(- 1)].value
kernel_shape = [kernel_h, kernel_w, num_output_channels, num_in_channels]
kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
(stride_h, stride_w) = stride
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if ((padding == 'VALID') and (dim_size is not None)):
dim_size += max((kernel_size - stride_size), 0)
return dim_size
batch_size = inputs.get_shape()[0].value
height = inputs.get_shape()[1].value
width = inputs.get_shape()[2].value
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = [batch_size, out_height, out_width, num_output_channels]
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, [1, stride_h, stride_w, 1], padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training, bn_decay=bn_decay, scope='bn', is_dist=is_dist)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
class E(nn.Module):
def __init__(self, num_features=64, activation=F.relu):
super(E, self).__init__()
self.num_features = num_features
self.activation = activation
self.block1 = OptimizedBlock(6, num_features)
self.block2 = Block(num_features, (num_features * 2), activation=activation, downsample=True)
self.block3 = Block((num_features * 2), (num_features * 4), activation=activation, downsample=True)
self.block4 = Block((num_features * 4), (num_features * 8), activation=activation, downsample=True)
self.block5 = Block((num_features * 8), (num_features * 8), activation=activation, downsample=True)
self.l6 = utils.spectral_norm(nn.Linear((num_features * 8), 256))
self.l7 = utils.spectral_norm(nn.Linear((num_features * 8), 256))
self._initialize()
def _initialize(self):
init.xavier_uniform_(self.l6.weight.data)
optional_l_y = getattr(self, 'l_y', None)
if (optional_l_y is not None):
init.xavier_uniform_(optional_l_y.weight.data)
def forward(self, x):
h = self.block1(x)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.activation(h)
h = torch.sum(h, dim=(2, 3))
h = self.activation(h)
e = torch.mean(h, dim=0, keepdim=True)
out1 = self.l6(h)
size = out1.size()
assert (len(size) == 2), 'size is not right'
mean = out1.view(size[0], size[1], 1, 1)
mean = torch.mean(mean, dim=0, keepdim=True)
std = self.l7(h)
std = std.view(size[0], size[1], 1, 1)
std = torch.mean(std, dim=0, keepdim=True)
return (e, mean, std) |
class MLPDuelingModel(Model):
def __init__(self, output_dim, name=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.contrib.layers.xavier_initializer, hidden_b_init=tf.zeros_initializer, output_nonlinearity=None, output_w_init=tf.contrib.layers.xavier_initializer, output_b_init=tf.zeros_initializer, layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
def _build(self, state_input, name=None):
action_out = mlp(input_var=state_input, output_dim=self._output_dim, hidden_sizes=self._hidden_sizes, name='action_value', hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization)
state_out = mlp(input_var=state_input, output_dim=1, hidden_sizes=self._hidden_sizes, name='state_value', hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization)
action_out_mean = tf.reduce_mean(action_out, 1)
action_out_advantage = (action_out - tf.expand_dims(action_out_mean, 1))
q_func_out = (state_out + action_out_advantage)
return q_func_out |
_builder('modelnet40_cls')
class ModelNetClassificationBuilder(MultiModalDatasetBuilder):
train_dataset_cls = ModelNetClassificationDataset
eval_dataset_cls = ModelNetClassificationDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/modelnet40/defaults_cls.yaml'} |
class Concatenate(Job):
def __init__(self, inputs):
assert inputs
if isinstance(inputs, set):
inputs = list(inputs)
inputs.sort(key=(lambda x: str(x)))
assert isinstance(inputs, list)
if (len(inputs) == 1):
self.out = inputs.pop()
else:
self.out = self.output_path('out.gz')
for input in inputs:
assert (isinstance(input, Path) or isinstance(input, str)), 'input to Concatenate is not a valid path'
self.inputs = inputs
def run(self):
self.f_list = ' '.join((str(i) for i in self.inputs))
self.sh('zcat -f {f_list} | gzip > {out}')
def tasks(self):
(yield Task('run', rqmt={'mem': 3, 'time': 3})) |
def compose_system_compile_flags(is_posix: bool) -> list:
if (not is_posix):
return []
(cflags, configure_cppflags, configure_cflags) = sysconfig.get_config_vars('CFLAGS', 'CONFIGURE_CPPFLAGS', 'CONFIGURE_CFLAGS')
return ((((cflags + ' ') + configure_cppflags) + ' ') + configure_cflags).split() |
def habitat_to_mp3d(pt_habitat: np.ndarray) -> np.ndarray:
return quat_rotate_vector(quat_from_two_vectors(np.array([0.0, 1.0, 0.0]), np.array([0.0, 0.0, 1.0])), pt_habitat) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.