code stringlengths 101 5.91M |
|---|
def test_olsq_depth_normal_circstr_devtmp():
lsqc_solver = OLSQ('depth', 'normal')
lsqc_solver.setdevice(device_tmp)
lsqc_solver.setprogram(circuit_str)
assert (lsqc_solver.solve()[2] == 14) |
def splev(x, tck, der=0, ext=0):
(t, c, k) = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map((lambda c, x=x, t=t, k=k, der=der: splev(x, [t, c, k], der, ext)), c))
else:
if (not (0 <= der <= k)):
raise ValueError(('0<=der=%d<=k=%d must hold' % (der, k)))
if (ext not in (0, 1, 2, 3)):
raise ValueError(('ext = %s not in (0, 1, 2, 3) ' % ext))
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
(y, ier) = _fitpack._spl_(x, der, t, c, k, ext)
if (ier == 10):
raise ValueError('Invalid input data')
if (ier == 1):
raise ValueError('Found x value not in the domain')
if ier:
raise TypeError('An error occurred')
return y.reshape(shape) |
def _prepare_gradient_if_op(fwd_op, input_names, output_names, then_grad_net, else_grad_net):
gradient_if_def = caffe2_pb2.OperatorDef()
gradient_if_def.CopyFrom(fwd_op)
del gradient_if_def.input[:]
gradient_if_def.input.extend(input_names)
del gradient_if_def.output[:]
gradient_if_def.output.extend(output_names)
then_net_arg = caffe2_pb2.Argument()
then_net_arg.name = 'then_net'
then_net_arg.n.CopyFrom(then_grad_net)
gradient_args = [then_net_arg]
if else_grad_net:
else_net_arg = caffe2_pb2.Argument()
else_net_arg.name = 'else_net'
else_net_arg.n.CopyFrom(else_grad_net)
gradient_args.append(else_net_arg)
del gradient_if_def.arg[:]
gradient_if_def.arg.extend(gradient_args)
if gradient_if_def.name:
gradient_if_def.name += '_grad'
del gradient_if_def.control_input[:]
gradient_if_def.is_gradient_op = True
return gradient_if_def |
def Cifar100(home_path, model_name):
from tensorflow.keras.datasets.cifar100 import load_data
((train_images, train_labels), (val_images, val_labels)) = load_data()
teacher = sio.loadmat((home_path + ('/pre_trained/%s.mat' % model_name)))
def pre_processing(image, is_training):
with tf.variable_scope('preprocessing'):
image = tf.cast(image, tf.float32)
image = ((image - np.array([112.4776, 124.1058, 129.3773])) / np.array([70.4587, 65.4312, 68.2094]))
def augmentation(image):
image = tf.image.random_flip_left_right(image)
sz = tf.shape(image)
image = tf.pad(image, [[0, 0], [4, 4], [4, 4], [0, 0]], 'REFLECT')
image = tf.random_crop(image, sz)
return image
image = tf.cond(is_training, (lambda : augmentation(image)), (lambda : image))
return image
return (train_images, train_labels, val_images, val_labels, pre_processing, teacher) |
_module()
class BasePartSeg(BaseSeg):
def __init__(self, encoder_args=None, decoder_args=None, cls_args=None, **kwargs):
super().__init__(encoder_args, decoder_args, cls_args, **kwargs)
def forward(self, p0, f0=None, cls0=None):
if hasattr(p0, 'keys'):
(p0, f0, cls0) = (p0['pos'], p0['x'], p0['cls'])
elif (f0 is None):
f0 = p0.transpose(1, 2).contiguous()
(p, f) = self.encoder.forward_seg_feat(p0, f0)
if (self.decoder is not None):
f = self.decoder(p, f, cls0).squeeze((- 1))
elif isinstance(f, list):
f = f[(- 1)]
if (self.head is not None):
f = self.head(f)
return f |
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected') |
def test_get_init_msa(msa_sampler):
seed = ['AAA', 'ACC', 'ACDE']
batch_size = 2
max_len = 5
result = msa_sampler.get_init_msa(seed, 5, 2)
assert (result.shape[0] == batch_size)
assert (result.shape[1] == len(seed))
assert (result.shape[2] == (max_len + 1))
assert (result[0][0].tolist() == [0, 5, 5, 5, 32, 32])
assert (result[0][1].tolist() == [0, 5, 23, 23, 32, 32])
assert (result[0][2].tolist() == [0, 5, 23, 13, 9, 32]) |
def clear_parameters():
global current_scope
for key in list(current_scope.keys()):
del current_scope[key] |
def process_base_case(header_contents):
retval = list()
if (len(header_contents) == 0):
return retval
if (len(header_contents) == 1):
headers = header_contents[0].header
header_text = ''.join([h for h in headers])
contents = header_contents[0].content
content_text = ''.join([c for c in contents])
return [('H', header_text), ('C', content_text)]
word_nums = [elem.get_num_words() for elem in header_contents]
agg = AgglomerativeClustering(n_clusters=2, linkage='average').fit([[x] for x in word_nums])
cluster_word_arrs = dict()
cluster_word_arrs[0] = list()
cluster_word_arrs[1] = list()
for idx in range(len(word_nums)):
label = agg.labels_[idx]
cluster_word_arrs[label].append(word_nums[idx])
mean_0 = (sum(cluster_word_arrs[0]) / len(cluster_word_arrs[0]))
mean_1 = (sum(cluster_word_arrs[1]) / len(cluster_word_arrs[1]))
if (mean_0 > mean_1):
greater_cluster = 0
else:
greater_cluster = 1
labels_agg = list(agg.labels_)
first_occ = labels_agg.index(greater_cluster)
last_occ = ((len(labels_agg) - 1) - labels_agg[::(- 1)].index(greater_cluster))
l = [len(x.header) for x in header_contents]
max_count = max(set(l), key=l.count)
for idx in range(len(header_contents)):
headers = header_contents[idx].header
if ((idx >= first_occ) and (idx <= last_occ)):
start_header_index = (len(headers) - max_count)
for index in range(len(headers)):
h = headers[index]
if (index < start_header_index):
ans_label = 'C'
else:
ans_label = 'H'
retval.append((ans_label, h))
else:
for h in headers:
retval.append(('C', h))
contents = header_contents[idx].content
for c in contents:
retval.append(('C', c))
return retval |
def load_checkpoint(train_config, path, map_location='cuda', strict=True):
model: torch.nn.Module = make_training_model(train_config)
state = torch.load(path, map_location=map_location)
model.load_state_dict(state['state_dict'], strict=strict)
model.on_load_checkpoint(state)
return model |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = FixedBatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn2 = FixedBatchNorm(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = FixedBatchNorm((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def test_get_nrows():
assert (1000 == get_nrows('1000'))
assert (1 == get_nrows('1'))
assert (get_nrows('None') is None)
assert (get_nrows('asdf') is None) |
def pre_user_cohort_demo(indir, patient_list):
cad_user_cohort_demo = {}
file = '{}/demo.csv'.format(indir)
with open(file, 'r') as f:
next(f)
for row in f:
row = row.split(',')
(id, db, sex) = (row[0], row[1], row[2])
if (id in patient_list):
cad_user_cohort_demo[id] = (db, sex)
return cad_user_cohort_demo |
_properties
class StreamingMemory(xf.SingleStateTransformation):
access = xf.PatternNode(nodes.AccessNode)
entry = xf.PatternNode(nodes.EntryNode)
exit = xf.PatternNode(nodes.ExitNode)
buffer_size = properties.Property(dtype=int, default=1, desc='Set buffer size for the newly-created stream')
storage = properties.EnumProperty(dtype=dtypes.StorageType, desc='Set storage type for the newly-created stream', default=dtypes.StorageType.Default)
use_memory_buffering = properties.Property(dtype=bool, default=False, desc='Set if memory buffering should be used.')
memory_buffering_target_bytes = properties.Property(dtype=int, default=64, desc='Set bytes read/written from memory if memory buffering is enabled.')
def expressions(cls) -> List[gr.SubgraphView]:
return [sdutil.node_path_graph(cls.access, cls.entry), sdutil.node_path_graph(cls.exit, cls.access)]
def can_be_applied(self, graph: SDFGState, expr_index: int, sdfg: SDFG, permissive: bool=False) -> bool:
access = self.access
if ((graph.out_degree(access) > 0) and (graph.in_degree(access) > 0)):
return False
if isinstance(sdfg.arrays[access.data], data.Stream):
return False
if (sdfg.arrays[access.data].storage not in [dtypes.StorageType.CPU_Heap, dtypes.StorageType.CPU_Pinned, dtypes.StorageType.GPU_Global, dtypes.StorageType.FPGA_Global]):
return False
curstate = graph
node = access
while (curstate is not None):
if (curstate.entry_node(node) is not None):
return False
if (curstate.parent.parent_nsdfg_node is None):
break
node = curstate.parent.parent_nsdfg_node
curstate = curstate.parent.parent
edges = (graph.out_edges(access) if (expr_index == 0) else graph.in_edges(access))
for edge in edges:
mpath = graph.memlet_path(edge)
if (len(mpath) != len(list(graph.memlet_tree(edge)))):
return False
innermost_edge = (mpath[(- 1)] if (expr_index == 0) else mpath[0])
dtype = sdfg.arrays[innermost_edge.data.data].dtype
if ((innermost_edge.data.subset.num_elements() != 1) or innermost_edge.data.dynamic or ((innermost_edge.data.volume != 1) and (not (isinstance(dtype, dtypes.vector) and (innermost_edge.data.volume == dtype.veclen))))):
return False
for pe in mpath:
node = (pe.dst if (expr_index == 0) else graph.entry_node(pe.src))
if (isinstance(node, nodes.MapEntry) and sdutil.has_dynamic_map_inputs(graph, node)):
return False
if (expr_index == 0):
other_node = self.entry
else:
other_node = self.exit
other_node = graph.entry_node(other_node)
if other_node.label.startswith('__s'):
return False
if self.use_memory_buffering:
access = self.access
desc = sdfg.arrays[access.data]
if (desc.storage != dtypes.StorageType.FPGA_Global):
return False
if ((self.memory_buffering_target_bytes % desc.dtype.bytes) != 0):
return False
if (self.memory_buffering_target_bytes < desc.dtype.bytes):
return False
strides = list(desc.strides)
if (strides[(- 1)] != 1):
return False
vector_size = int((self.memory_buffering_target_bytes / desc.dtype.bytes))
strides.pop()
for stride in strides:
if (is_int(stride) and ((stride % vector_size) != 0)):
return False
state = sdfg.node(self.state_id)
dnode: nodes.AccessNode = self.access
if (self.expr_index == 0):
edges = state.out_edges(dnode)
else:
edges = state.in_edges(dnode)
mapping: Dict[(Tuple[subsets.Range], List[gr.MultiConnectorEdge[mm.Memlet]])] = defaultdict(list)
ranges = {}
for edge in edges:
mpath = state.memlet_path(edge)
ranges[edge] = _collect_map_ranges(state, mpath)
mapping[tuple((r[1] for r in ranges[edge]))].append(edge)
for edges_with_same_range in mapping.values():
for edge in edges_with_same_range:
mpath = state.memlet_path(edge)
innermost_edge = copy.deepcopy((mpath[(- 1)] if (self.expr_index == 0) else mpath[0]))
edge_subset = [a_tuple[0] for a_tuple in list(innermost_edge.data.subset)]
if (self.expr_index == 0):
map_subset = innermost_edge.src.map.params.copy()
ranges = list(innermost_edge.src.map.range)
else:
map_subset = innermost_edge.dst.map.params.copy()
ranges = list(innermost_edge.dst.map.range)
if (is_int(ranges[(- 1)][1]) and (((ranges[(- 1)][1] + 1) % vector_size) != 0)):
return False
if (ranges[(- 1)][2] != 1):
return False
if (isinstance(edge_subset[(- 1)], symbol) and (str(edge_subset[(- 1)]) == map_subset[(- 1)])):
pass
elif isinstance(edge_subset[(- 1)], sympy.core.add.Add):
counter: int = 0
for arg in edge_subset[(- 1)].args:
if (isinstance(arg, symbol) and (str(arg) == map_subset[(- 1)])):
counter += 1
if (counter != 1):
return False
else:
return False
return True
def apply(self, state: SDFGState, sdfg: SDFG) -> nodes.AccessNode:
dnode: nodes.AccessNode = self.access
if (self.expr_index == 0):
edges = state.out_edges(dnode)
else:
edges = state.in_edges(dnode)
mapping: Dict[(Tuple[subsets.Range], List[gr.MultiConnectorEdge[mm.Memlet]])] = defaultdict(list)
ranges = {}
for edge in edges:
mpath = state.memlet_path(edge)
ranges[edge] = _collect_map_ranges(state, mpath)
mapping[tuple((r[1] for r in ranges[edge]))].append(edge)
components_to_create: Dict[(Tuple[symbolic.SymbolicType], List[gr.MultiConnectorEdge[mm.Memlet]])] = defaultdict(list)
for edges_with_same_range in mapping.values():
for edge in edges_with_same_range:
mpath = state.memlet_path(edge)
innermost_edge = copy.deepcopy((mpath[(- 1)] if (self.expr_index == 0) else mpath[0]))
expr = _canonicalize_memlet(innermost_edge.data, ranges[edge])
components_to_create[expr].append((innermost_edge, edge))
components = list(components_to_create.values())
if (self.expr_index == 0):
ccs_to_add = []
for (i, component) in enumerate(components):
edges_to_remove = set()
for cedge in component:
if any((nx.has_path(state.nx, o[1].dst, cedge[1].dst) for o in component if (o is not cedge))):
ccs_to_add.append([cedge])
edges_to_remove.add(cedge)
if edges_to_remove:
components[i] = [c for c in component if (c not in edges_to_remove)]
components.extend(ccs_to_add)
desc = sdfg.arrays[dnode.data]
streams = {}
mpaths = {}
for edge in edges:
if self.use_memory_buffering:
arrname = str(self.access)
total_size = edge.data.volume
vector_size = int((self.memory_buffering_target_bytes / desc.dtype.bytes))
if (not is_int(sdfg.arrays[dnode.data].shape[(- 1)])):
warnings.warn('Using the MemoryBuffering transformation is potential unsafe since {sym} is not an integer. There should be no issue if {sym} % {vec} == 0'.format(sym=sdfg.arrays[dnode.data].shape[(- 1)], vec=vector_size))
for i in sdfg.arrays[dnode.data].strides:
if (not is_int(i)):
warnings.warn('Using the MemoryBuffering transformation is potential unsafe since {sym} is not an integer. There should be no issue if {sym} % {vec} == 0'.format(sym=i, vec=vector_size))
if (self.expr_index == 0):
edges = state.out_edges(dnode)
gearbox_input_type = dtypes.vector(desc.dtype, vector_size)
gearbox_output_type = desc.dtype
gearbox_read_volume = (total_size / vector_size)
gearbox_write_volume = total_size
else:
edges = state.in_edges(dnode)
gearbox_input_type = desc.dtype
gearbox_output_type = dtypes.vector(desc.dtype, vector_size)
gearbox_read_volume = total_size
gearbox_write_volume = (total_size / vector_size)
(input_gearbox_name, input_gearbox_newdesc) = sdfg.add_stream('gearbox_input', gearbox_input_type, buffer_size=self.buffer_size, storage=self.storage, transient=True, find_new_name=True)
(output_gearbox_name, output_gearbox_newdesc) = sdfg.add_stream('gearbox_output', gearbox_output_type, buffer_size=self.buffer_size, storage=self.storage, transient=True, find_new_name=True)
read_to_gearbox = state.add_read(input_gearbox_name)
write_from_gearbox = state.add_write(output_gearbox_name)
gearbox = Gearbox((total_size / vector_size))
state.add_node(gearbox)
state.add_memlet_path(read_to_gearbox, gearbox, dst_conn='from_memory', memlet=Memlet((input_gearbox_name + '[0]'), volume=gearbox_read_volume))
state.add_memlet_path(gearbox, write_from_gearbox, src_conn='to_kernel', memlet=Memlet((output_gearbox_name + '[0]'), volume=gearbox_write_volume))
if (self.expr_index == 0):
streams[edge] = input_gearbox_name
name = output_gearbox_name
newdesc = output_gearbox_newdesc
else:
streams[edge] = output_gearbox_name
name = input_gearbox_name
newdesc = input_gearbox_newdesc
else:
stream_name = ('stream_' + dnode.data)
(name, newdesc) = sdfg.add_stream(stream_name, desc.dtype, buffer_size=self.buffer_size, storage=self.storage, transient=True, find_new_name=True)
streams[edge] = name
output_gearbox_name = name
input_gearbox_name = name
mpath = state.memlet_path(edge)
mpaths[edge] = mpath
for e in mpath:
e.data = mm.Memlet(data=name, subset='0', other_subset=e.data.other_subset)
if isinstance(e.src, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.src, e.src_conn, newdesc)
if isinstance(e.dst, nodes.NestedSDFG):
e.data.dynamic = True
_streamify_recursive(e.dst, e.dst_conn, newdesc)
if (self.expr_index == 0):
replacement = state.add_read(output_gearbox_name)
state.remove_edge(edge)
state.add_edge(replacement, edge.src_conn, edge.dst, edge.dst_conn, edge.data)
else:
replacement = state.add_write(input_gearbox_name)
state.remove_edge(edge)
state.add_edge(edge.src, edge.src_conn, replacement, edge.dst_conn, edge.data)
if self.use_memory_buffering:
arrname = str(self.access)
vector_size = int((self.memory_buffering_target_bytes / desc.dtype.bytes))
dtype = sdfg.arrays[arrname].dtype
sdfg.arrays[arrname].dtype = dtypes.vector(dtype, vector_size)
new_shape = list(sdfg.arrays[arrname].shape)
contigidx = sdfg.arrays[arrname].strides.index(1)
new_shape[contigidx] /= vector_size
try:
new_shape[contigidx] = int(new_shape[contigidx])
except TypeError:
pass
sdfg.arrays[arrname].shape = new_shape
new_strides: List = list(sdfg.arrays[arrname].strides)
for i in range(len(new_strides)):
if (i == (len(new_strides) - 1)):
continue
new_strides[i] = (new_strides[i] / vector_size)
sdfg.arrays[arrname].strides = new_strides
post_state = get_post_state(sdfg, state)
if (post_state != None):
for e in post_state.edges():
if (e.data.data == self.access.data):
new_subset = list(e.data.subset)
(i, j, k) = new_subset[(- 1)]
new_subset[(- 1)] = (i, (((j + 1) / vector_size) - 1), k)
e.data = mm.Memlet(data=str(e.src), subset=subsets.Range(new_subset))
ionodes = []
for component in components:
(innermost_edge, outermost_edge) = component[0]
mpath = mpaths[outermost_edge]
mapname = streams[outermost_edge]
innermost_edge.data.other_subset = None
if (self.expr_index == 0):
opname = 'read'
path = [e.dst for e in mpath[:(- 1)]]
rmemlets = [(dnode, '__inp', innermost_edge.data)]
wmemlets = []
for (i, (_, edge)) in enumerate(component):
name = streams[edge]
ionode = state.add_write(name)
ionodes.append(ionode)
wmemlets.append((ionode, ('__out%d' % i), mm.Memlet(data=name, subset='0')))
code = '\n'.join((('__out%d = __inp' % i) for i in range(len(component))))
else:
if (len(component) > 1):
warnings.warn(f'More than one input found for the same index for {dnode.data}')
opname = 'write'
path = [state.entry_node(e.src) for e in reversed(mpath[1:])]
wmemlets = [(dnode, '__out', innermost_edge.data)]
rmemlets = []
for (i, (_, edge)) in enumerate(component):
name = streams[edge]
ionode = state.add_read(name)
ionodes.append(ionode)
rmemlets.append((ionode, ('__inp%d' % i), mm.Memlet(data=name, subset='0')))
code = '__out = __inp0'
maps = []
for entry in path:
map: nodes.Map = entry.map
ranges = [(p, (r[0], r[1], r[2])) for (p, r) in zip(map.params, map.range)]
if self.use_memory_buffering:
edge_subset = [a_tuple[0] for a_tuple in list(innermost_edge.data.subset)]
if (isinstance(edge_subset[(- 1)], symbol) and (str(edge_subset[(- 1)]) == map.params[(- 1)])):
if (not is_int(ranges[(- 1)][1][1])):
warnings.warn('Using the MemoryBuffering transformation is potential unsafe since {sym} is not an integer. There should be no issue if {sym} % {vec} == 0'.format(sym=ranges[(- 1)][1][1].args[1], vec=vector_size))
ranges[(- 1)] = (ranges[(- 1)][0], (ranges[(- 1)][1][0], (((ranges[(- 1)][1][1] + 1) / vector_size) - 1), ranges[(- 1)][1][2]))
elif isinstance(edge_subset[(- 1)], sympy.core.add.Add):
for arg in edge_subset[(- 1)].args:
if (isinstance(arg, symbol) and (str(arg) == map.params[(- 1)])):
if (not is_int(ranges[(- 1)][1][1])):
warnings.warn('Using the MemoryBuffering transformation is potential unsafe since {sym} is not an integer. There should be no issue if {sym} % {vec} == 0'.format(sym=ranges[(- 1)][1][1].args[1], vec=vector_size))
ranges[(- 1)] = (ranges[(- 1)][0], (ranges[(- 1)][1][0], (((ranges[(- 1)][1][1] + 1) / vector_size) - 1), ranges[(- 1)][1][2]))
maps.append(state.add_map(f'__s{opname}_{mapname}', ranges, map.schedule))
tasklet = state.add_tasklet(f'{opname}_{mapname}', {m[1] for m in rmemlets}, {m[1] for m in wmemlets}, code)
for (node, cname, memlet) in rmemlets:
state.add_memlet_path(node, *(me for (me, _) in maps), tasklet, dst_conn=cname, memlet=memlet)
for (node, cname, memlet) in wmemlets:
state.add_memlet_path(tasklet, *(mx for (_, mx) in reversed(maps)), node, src_conn=cname, memlet=memlet)
return ionodes |
def process_root_test():
root = '~/test'
module_dir = utils.process_root(root, module_name='test_module')
print(module_dir) |
class TestBidafPredictor(TestCase):
def test_uses_named_inputs(self):
inputs = {'question': 'What kind of test succeeded on its first attempt?', 'passage': 'One time I was writing a unit test, and it succeeded on the first attempt.'}
archive = load_archive('tests/fixtures/bidaf/serialization/model.tar.gz')
predictor = Predictor.from_archive(archive, 'machine-comprehension')
result = predictor.predict_json(inputs)
best_span = result.get('best_span')
assert (best_span is not None)
assert isinstance(best_span, list)
assert (len(best_span) == 2)
assert all((isinstance(x, int) for x in best_span))
assert (best_span[0] <= best_span[1])
best_span_str = result.get('best_span_str')
assert isinstance(best_span_str, str)
assert (best_span_str != '')
for probs_key in ('span_start_probs', 'span_end_probs'):
probs = result.get(probs_key)
assert (probs is not None)
assert all((isinstance(x, float) for x in probs))
assert (sum(probs) == approx(1.0))
def test_batch_prediction(self):
inputs = [{'question': 'What kind of test succeeded on its first attempt?', 'passage': 'One time I was writing a unit test, and it succeeded on the first attempt.'}, {'question': 'What kind of test succeeded on its first attempt at batch processing?', 'passage': 'One time I was writing a unit test, and it always failed!'}]
archive = load_archive('tests/fixtures/bidaf/serialization/model.tar.gz')
predictor = Predictor.from_archive(archive, 'machine-comprehension')
results = predictor.predict_batch_json(inputs)
assert (len(results) == 2)
for result in results:
best_span = result.get('best_span')
best_span_str = result.get('best_span_str')
start_probs = result.get('span_start_probs')
end_probs = result.get('span_end_probs')
assert (best_span is not None)
assert isinstance(best_span, list)
assert (len(best_span) == 2)
assert all((isinstance(x, int) for x in best_span))
assert (best_span[0] <= best_span[1])
assert isinstance(best_span_str, str)
assert (best_span_str != '')
for probs in (start_probs, end_probs):
assert (probs is not None)
assert all((isinstance(x, float) for x in probs))
assert (sum(probs) == approx(1.0)) |
def _copyto(a, val, mask):
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a |
class BatchPermutationOpTest(unittest.TestCase):
def _run_op_test(self, X, I, check_grad=False):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', X)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if check_grad:
gc = gradient_checker.GradientChecker(stepsize=0.1, threshold=0.001, device_option=core.DeviceOption(caffe2_pb2.CUDA, 0))
(res, grad, grad_estimated) = gc.CheckSimple(op, [X, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = X[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-05, atol=1e-08)
def _run_speed_test(self, iters=5, N=1024):
net = core.Net('test')
net.Proto().type = 'prof_dag'
net.Proto().num_workers = 2
Y = net.BatchPermutation(['X', 'I'], 'Y')
Y_flat = net.FlattenToVec([Y], 'Y_flat')
loss = net.AveragedLoss([Y_flat], 'loss')
net.AddGradientOperators([loss])
workspace.CreateNet(net)
X = np.random.randn(N, 256, 14, 14)
for _i in range(iters):
I = np.random.permutation(N)
workspace.FeedBlob('X', X.astype(np.float32))
workspace.FeedBlob('I', I.astype(np.int32))
workspace.RunNet(net.Proto().name)
np.testing.assert_allclose(workspace.FetchBlob('Y'), X[I], rtol=1e-05, atol=1e-08)
def test_forward_and_gradient(self):
A = np.random.randn(2, 3, 5, 7).astype(np.float32)
I = np.array([0, 1], dtype=np.int32)
self._run_op_test(A, I, check_grad=True)
A = np.random.randn(2, 3, 5, 7).astype(np.float32)
I = np.array([1, 0], dtype=np.int32)
self._run_op_test(A, I, check_grad=True)
A = np.random.randn(10, 3, 5, 7).astype(np.float32)
I = np.array(np.random.permutation(10), dtype=np.int32)
self._run_op_test(A, I, check_grad=True)
def test_size_exceptions(self):
A = np.random.randn(2, 256, 42, 86).astype(np.float32)
I = np.array(np.random.permutation(10), dtype=np.int32)
with self.assertRaises(RuntimeError):
self._run_op_test(A, I) |
def solc_wrapper(solc_binary: Union[(Path, str)]=None, stdin: str=None, source_files: Union[(List, Path, str)]=None, import_remappings: Union[(Dict, List, str)]=None, success_return_code: int=None, **kwargs: Any) -> Tuple[(str, str, List, subprocess.Popen)]:
if solc_binary:
solc_binary = Path(solc_binary)
else:
solc_binary = install.get_executable()
solc_version = _get_solc_version(solc_binary)
command: List = [str(solc_binary)]
if (success_return_code is None):
success_return_code = (1 if ('help' in kwargs) else 0)
if (source_files is not None):
if isinstance(source_files, (str, Path)):
command.append(_to_string('source_files', source_files))
else:
command.extend([_to_string('source_files', i) for i in source_files])
if (import_remappings is not None):
if isinstance(import_remappings, str):
command.append(import_remappings)
else:
if isinstance(import_remappings, dict):
import_remappings = [f'{k}={v}' for (k, v) in import_remappings.items()]
command.extend(import_remappings)
for (key, value) in kwargs.items():
if ((value is None) or (value is False)):
continue
key = f"--{key.replace('_', '-')}"
if (value is True):
command.append(key)
else:
command.extend([key, _to_string(key, value)])
if (('standard_json' not in kwargs) and (not source_files)):
command.append('-')
if (stdin is not None):
stdin = str(stdin)
proc = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8')
(stdoutdata, stderrdata) = proc.communicate(stdin)
if (proc.returncode != success_return_code):
if stderrdata.startswith('unrecognised option'):
flag = stderrdata.split("'")[1]
raise UnknownOption(f"solc {solc_version} does not support the '{flag}' option'")
if stderrdata.startswith('Invalid option'):
(flag, option) = stderrdata.split(': ')
flag = flag.split(' ')[(- 1)]
raise UnknownValue(f"solc {solc_version} does not accept '{option}' as an option for the '{flag}' flag")
raise SolcError(command=command, return_code=proc.returncode, stdin_data=stdin, stdout_data=stdoutdata, stderr_data=stderrdata)
return (stdoutdata, stderrdata, command, proc) |
class HyperParam():
def __init__(self, dtype=None, bounds=None, classes=None, log=False, default=None):
if isinstance(dtype, (list, tuple)):
assert (classes is None)
assert (bounds is None)
classes = dtype
dtype = None
if (dtype is None):
assert (classes is not None)
elif (dtype == 'float'):
dtype = float
elif (dtype == 'int'):
dtype = int
elif (dtype == 'bool'):
dtype = bool
assert (dtype in (float, int, bool, None))
if (bounds is not None):
assert (dtype in (int, float))
assert isinstance(bounds, (list, tuple))
assert (len(bounds) == 2)
assert (dtype(bounds[0]) < dtype(bounds[1]))
if (classes is not None):
assert isinstance(classes, (list, tuple)), 'should be with a defined order'
assert (len(classes) > 0)
self.dtype = dtype
self.bounds = bounds
self.classes = classes
self.log_space = log
self.default = default
self.unique_idx = HyperParam._get_next_unique_idx()
self.usages = []
_unique_idx = 0
def _get_next_unique_idx(cls):
cls._unique_idx += 1
return cls._unique_idx
def __repr__(self):
if (self.classes is not None):
return ('HyperParam(%r)' % self.classes)
dtype_name = self.dtype.__name__
ext = ''
if self.log_space:
ext += ', log=True'
if (self.default is not None):
ext += (', default=%r' % self.default)
if (self.bounds is not None):
return ('HyperParam(%s, %s%s)' % (dtype_name, self.bounds, ext))
assert (self.bounds is None)
return ('HyperParam(%s%s)' % (dtype_name, ext))
def get_canonical_usage(self):
return self.get_sorted_usages()[0]
def get_sorted_usages(self):
return sorted(self.usages, key=(lambda chain: min(2, len(chain.chain))))
def description(self):
if (len(self.usages) == 0):
usage_str = '<no usage>'
elif (len(self.usages) == 1):
usage_str = str(self.usages[0])
else:
usage_str = (str(self.get_canonical_usage()) + '|...')
return (usage_str + (': %s' % self))
def get_num_instances(self, upper_limit=100):
assert (upper_limit >= 2)
if (self.classes is not None):
return min(len(self.classes), upper_limit)
if (self.dtype is bool):
return 2
if (self.dtype is float):
return upper_limit
if (self.dtype is int):
(x1, x2) = self.bounds
x1 = numpy.ceil(x1)
x2 = numpy.floor(x2)
assert (x1 < x2)
return min(((x2 - x1) + 1), upper_limit)
raise Exception(('invalid dtype %r' % self.dtype))
def merge_values(self, value1, value2):
if (self.dtype is bool):
return value1
if self.log_space:
(x0, x1) = (value1, value2)
if (x0 > x1):
(x0, x1) = (x1, x0)
if ((x0 < 0) or (x1 < 0)):
assert (x0 <= x1 <= 0)
sign = (- 1)
(x0, x1) = ((- x1), (- x0))
else:
sign = 1
assert (x1 >= x0 >= 0)
x0o = x0
if (x0 < (Eps * 0.5)):
x0 = (Eps * 0.5)
if (x1 < Eps):
x1 = Eps
x0 = numpy.log(float(x0))
x1 = numpy.log(float(x1))
y = numpy.exp((x0 + ((x1 - x0) * 0.5)))
if (y <= Eps):
y = x0o
return (self.dtype(y) * sign)
if (self.dtype is int):
return ((value1 + value2) // 2)
return self.dtype(((value1 + value2) * 0.5))
def get_value(self, selected, eps=Eps):
assert (0 < eps)
assert (0 <= selected <= 1)
if self.classes:
return self.classes[int((len(self.classes) * selected))]
if (self.dtype is bool):
return (selected > 0.5)
if self.bounds:
if ((self.dtype is int) and (not self.log_space)):
return (self.bounds[0] + int(((self.bounds[1] - self.bounds[0]) * selected)))
if self.log_space:
(x0, x1) = self.bounds
if ((x0 < 0) or (x1 < 0)):
assert (x0 < x1 <= 0)
sign = (- 1)
(x0, x1) = ((- x1), (- x0))
else:
sign = 1
assert (x1 > x0 >= 0)
(x0b, x1b) = (x0, x1)
if (x0b < (eps * 0.5)):
x0b = (eps * 0.5)
if (x1b < eps):
x1b = eps
x0l = numpy.log(float(x0b))
x1l = numpy.log(float(x1b))
y = numpy.exp((x0l + ((x1l - x0l) * selected)))
if (y <= eps):
y = x0
return (self.dtype(y) * sign)
return self.dtype((self.bounds[0] + ((self.bounds[1] - self.bounds[0]) * selected)))
x = selected
if (x < eps):
x = eps
if (x > (1.0 - eps)):
x = (1.0 - eps)
import scipy.special
return self.dtype(scipy.special.ndtri(x))
def get_initial_value(self):
return self.get_value(selected=0.5)
def get_default_value(self):
if (self.default is not None):
return self.dtype(self.default)
return self.get_initial_value()
def get_random_value(self, seed, eps=Eps):
rnd = numpy.random.RandomState(seed=seed)
x = rnd.uniform(0.0, 1.0)
if (x < eps):
x = 0.0
if (x > (1.0 - eps)):
x = 1.0
return self.get_value(x, eps=eps)
def get_random_value_by_idx(self, iteration_idx, individual_idx):
seed = hash_obj((self.get_canonical_usage(), iteration_idx, individual_idx))
return self.get_random_value(seed=seed) |
class B(FairseqDataclass):
bar: A = field(default=A())
foo: int = field(default=0, metadata={'help': 'not a bar'}) |
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
setup_multi_processes(cfg)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
cfg.data.test.pipeline[1].img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.gpu_id is not None):
cfg.gpu_ids = [args.gpu_id]
if (args.launcher == 'none'):
cfg.gpu_ids = [args.gpu_id]
distributed = False
if (len(cfg.gpu_ids) > 1):
warnings.warn(f'The gpu-ids is reset from {cfg.gpu_ids} to {cfg.gpu_ids[0:1]} to avoid potential error in non-distribute testing time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(rank, _) = get_dist_info()
if ((args.work_dir is not None) and (rank == 0)):
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.aug_test:
json_file = osp.join(args.work_dir, f'eval_multi_scale_{timestamp}.json')
else:
json_file = osp.join(args.work_dir, f'eval_single_scale_{timestamp}.json')
elif (rank == 0):
work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
mmcv.mkdir_or_exist(osp.abspath(work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.aug_test:
json_file = osp.join(work_dir, f'eval_multi_scale_{timestamp}.json')
else:
json_file = osp.join(work_dir, f'eval_single_scale_{timestamp}.json')
dataset = build_dataset(cfg.data.test)
loader_cfg = dict(num_gpus=len(cfg.gpu_ids), dist=distributed, shuffle=False)
loader_cfg.update({k: v for (k, v) in cfg.data.items() if (k not in ['train', 'val', 'test', 'train_dataloader', 'val_dataloader', 'test_dataloader'])})
test_loader_cfg = {**loader_cfg, 'samples_per_gpu': 1, 'shuffle': False, **cfg.data.get('test_dataloader', {})}
data_loader = build_dataloader(dataset, **test_loader_cfg)
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
print('"CLASSES" not found in meta, use dataset.CLASSES instead')
model.CLASSES = dataset.CLASSES
if ('PALETTE' in checkpoint.get('meta', {})):
model.PALETTE = checkpoint['meta']['PALETTE']
else:
print('"PALETTE" not found in meta, use dataset.PALETTE instead')
model.PALETTE = dataset.PALETTE
torch.cuda.empty_cache()
eval_kwargs = ({} if (args.eval_options is None) else args.eval_options)
efficient_test = eval_kwargs.get('efficient_test', False)
if efficient_test:
warnings.warn('``efficient_test=True`` does not have effect in tools/test.py, the evaluation and format results are CPU memory efficient by default')
eval_on_format_results = ((args.eval is not None) and ('cityscapes' in args.eval))
if eval_on_format_results:
assert (len(args.eval) == 1), 'eval on format results is not applicable for metrics other than cityscapes'
if (args.format_only or eval_on_format_results):
if ('imgfile_prefix' in eval_kwargs):
tmpdir = eval_kwargs['imgfile_prefix']
else:
tmpdir = '.format_cityscapes'
eval_kwargs.setdefault('imgfile_prefix', tmpdir)
mmcv.mkdir_or_exist(tmpdir)
else:
tmpdir = None
cfg.device = get_device()
if (not distributed):
warnings.warn('SyncBN is only supported with DDP. To be compatible with DP, we convert SyncBN to BN. Please use dist_train.sh which can avoid this error.')
if (not torch.cuda.is_available()):
assert (digit_version(mmcv.__version__) >= digit_version('1.4.4')), 'Please use MMCV >= 1.4.4 for CPU training!'
model = revert_sync_batchnorm(model)
model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)
results = single_gpu_test(model, data_loader, args.show, args.show_dir, False, args.opacity, pre_eval=((args.eval is not None) and (not eval_on_format_results)), format_only=(args.format_only or eval_on_format_results), format_args=eval_kwargs)
else:
model = build_ddp(model, cfg.device, device_ids=[int(os.environ['LOCAL_RANK'])], broadcast_buffers=False)
results = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, False, pre_eval=((args.eval is not None) and (not eval_on_format_results)), format_only=(args.format_only or eval_on_format_results), format_args=eval_kwargs)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
warnings.warn('The behavior of ``args.out`` has been changed since MMSeg v0.16, the pickled outputs could be seg map as type of np.array, pre-eval results or file paths for ``dataset.format_results()``.')
print(f'''
writing results to {args.out}''')
mmcv.dump(results, args.out)
if args.eval:
eval_kwargs.update(metric=args.eval)
metric = dataset.evaluate(results, **eval_kwargs)
metric_dict = dict(config=args.config, metric=metric)
mmcv.dump(metric_dict, json_file, indent=4)
if ((tmpdir is not None) and eval_on_format_results):
shutil.rmtree(tmpdir) |
class C(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1, groups=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, groups=groups)
def forward(self, input):
output = self.conv(input)
return output |
def isotopism(p):
if isinstance(p, (Integer, int)):
return Permutation(range(1, (p + 1)))
if isinstance(p, PermutationGroupElement):
return Permutation(list(p.tuple()))
if isinstance(p, list):
return Permutation([(x + 1) for x in p])
if isinstance(p, tuple):
if isinstance(p[0], Integer):
return Permutation(tuple(((x + 1) for x in p)))
if isinstance(p[0], tuple):
x = isotopism(p[0])
for i in range(1, len(p)):
x = x._left_to_right_multiply_on_left(isotopism(p[i]))
return x
raise TypeError('unable to convert {!r} to isotopism'.format(p)) |
class EarlyStopping():
def __init__(self, name, patience=8):
self.patience = patience
self.best_model = None
self.best_score = None
self.best_epoch = 0
self.epoch = 0
self.name = name
self.logger = LogHelper.get_logger(EarlyStopping.__name__)
def __call__(self, model, acc):
self.epoch += 1
if (self.best_score is None):
self.best_score = acc
if (acc >= self.best_score):
torch.save(model.state_dict(), 'models/{0}.best.save'.format(self.name))
self.best_score = acc
self.best_epoch = self.epoch
self.logger.info('Saving best weights from round {0}'.format(self.epoch))
return False
elif (self.epoch > (self.best_epoch + self.patience)):
self.logger.info('Early stopping: Terminate')
return True
self.logger.info('Early stopping: Worse Round')
return False
def set_best_state(self, model):
self.logger.info('Loading weights from round {0}'.format(self.best_epoch))
model.load_state_dict(torch.load('models/{0}.best.save'.format(self.name))) |
def flatten_and_concat(Xs: List[torch.Tensor]) -> torch.Tensor:
return torch.cat([X.flatten() for X in Xs], dim=0) |
def upsample2(input, data_format):
assert (data_format == 'NHWC')
output = tf.transpose(input, [0, 3, 1, 2])
output = tf.concat([output, output, output, output], axis=1)
output = tf.transpose(output, [0, 2, 3, 1])
output = tf.depth_to_space(output, 2)
return output |
def WDLEstimator(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 128, 64), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', task='binary', model_dir=None, config=None, linear_optimizer='Ftrl', dnn_optimizer='Adagrad', training_chief_hooks=None):
def _model_fn(features, labels, mode, config):
train_flag = (mode == tf.estimator.ModeKeys.TRAIN)
linear_logits = get_linear_logit(features, linear_feature_columns, l2_reg_linear=l2_reg_linear)
with variable_scope(DNN_SCOPE_NAME):
(sparse_embedding_list, dense_value_list) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding=l2_reg_embedding)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input, training=train_flag)
dnn_logits = Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_out)
logits = (linear_logits + dnn_logits)
return deepctr_model_fn(features, mode, logits, labels, task, linear_optimizer, dnn_optimizer, training_chief_hooks=training_chief_hooks)
return tf.estimator.Estimator(_model_fn, model_dir=model_dir, config=config) |
def _write_signal(sim, signal_name, signal_value):
signal_name = _find_signal(sim, signal_name)
sim.io[signal_name] = signal_value |
def parse_args():
parser = argparse.ArgumentParser(description='Re-evaluate results')
parser.add_argument('output_dir', nargs=1, help='results directory', type=str)
parser.add_argument('--dataset', dest='dataset_name', help='dataset to re-evaluate', default='voc_2007_test', type=str)
parser.add_argument('--matlab', dest='matlab_eval', help='use matlab for evaluation', action='store_true')
parser.add_argument('--comp', dest='comp_mode', help='competition mode', action='store_true')
parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args |
def handleEntity(ctxObj, publish):
print('Implement losic')
print(ctxObj)
publish(ctxObj) |
class EntangleNode(Node):
def __init__(self, name: str, timeline: 'Timeline', src_list: List[str]):
super().__init__(name, timeline)
self.bsm_name = (name + '.bsm')
bsm = QSDetectorFockInterference(self.bsm_name, timeline, src_list)
self.add_component(bsm)
bsm.attach(self)
self.set_first_component(self.bsm_name)
self.resolution = max([d.time_resolution for d in bsm.detectors])
bsm.set_detector(0, efficiency=BSM_DET1_EFFICIENCY, count_rate=SPDC_FREQUENCY, dark_count=BSM_DET1_DARK)
bsm.set_detector(1, efficiency=BSM_DET2_EFFICIENCY, count_rate=SPDC_FREQUENCY, dark_count=BSM_DET1_DARK)
def receive_qubit(self, src: str, qubit) -> None:
self.components[self.first_component_name].get(qubit, src=src)
def get_detector_entries(self, detector_name: str, start_time: int, num_bins: int, frequency: float):
trigger_times = self.components[detector_name].get_photon_times()
return_res = ([0] * num_bins)
for time in trigger_times[0]:
closest_bin = int(round((((time - start_time) * frequency) * 1e-12)))
expected_time = (((float(closest_bin) * .0) / frequency) + start_time)
if ((abs((expected_time - time)) < self.resolution) and (0 <= closest_bin < num_bins)):
return_res[closest_bin] += 1
for time in trigger_times[1]:
closest_bin = int(round((((time - start_time) * frequency) * 1e-12)))
expected_time = (((float(closest_bin) * .0) / frequency) + start_time)
if ((abs((expected_time - time)) < self.resolution) and (0 <= closest_bin < num_bins)):
return_res[closest_bin] += 2
return return_res |
def read_cifar10(filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1
result.height = 32
result.width = 32
result.depth = 3
image_bytes = ((result.height * result.width) * result.depth)
record_bytes = (label_bytes + image_bytes)
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
(result.key, value) = reader.read(filename_queue)
record_bytes = tf.decode_raw(value, tf.uint8)
result.label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]), [result.depth, result.height, result.width])
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result |
def mobilenet_v2(pretrained=False, progress=True, device='cpu', **kwargs):
model = MobileNetV2(**kwargs)
if pretrained:
script_dir = os.path.dirname(__file__)
state_dict = torch.load((script_dir + '/state_dicts/mobilenet_v2.pt'), map_location=device)
model.load_state_dict(state_dict)
return model |
def pytorch_func(a, b, c, d, e, f, tensor_kwargs=None):
if (tensor_kwargs is None):
tensor_kwargs = {'device': a.device, 'dtype': a.dtype}
_a_out = a
_b_out = b
_c_out = _broadcast_and_stack([c[(..., 0)], c[(..., 1)], c[(..., 2)]], dim=(- 1))
_d_out = _broadcast_and_stack([_broadcast_and_stack([d[(..., 0, 0)], d[(..., 1, 0)]], dim=(- 1)), _broadcast_and_stack([d[(..., 0, 1)], d[(..., 1, 1)]], dim=(- 1))], dim=(- 1))
_e_out = _broadcast_and_stack([e[(..., 0)], e[(..., 1)], e[(..., 2)], e[(..., 3)], e[(..., 4)]], dim=(- 1))
_f_out = _broadcast_and_stack([_broadcast_and_stack([f[(..., 0, 0)], f[(..., 1, 0)], f[(..., 2, 0)], f[(..., 3, 0)], f[(..., 4, 0)], f[(..., 5, 0)]], dim=(- 1)), _broadcast_and_stack([f[(..., 0, 1)], f[(..., 1, 1)], f[(..., 2, 1)], f[(..., 3, 1)], f[(..., 4, 1)], f[(..., 5, 1)]], dim=(- 1)), _broadcast_and_stack([f[(..., 0, 2)], f[(..., 1, 2)], f[(..., 2, 2)], f[(..., 3, 2)], f[(..., 4, 2)], f[(..., 5, 2)]], dim=(- 1)), _broadcast_and_stack([f[(..., 0, 3)], f[(..., 1, 3)], f[(..., 2, 3)], f[(..., 3, 3)], f[(..., 4, 3)], f[(..., 5, 3)]], dim=(- 1)), _broadcast_and_stack([f[(..., 0, 4)], f[(..., 1, 4)], f[(..., 2, 4)], f[(..., 3, 4)], f[(..., 4, 4)], f[(..., 5, 4)]], dim=(- 1)), _broadcast_and_stack([f[(..., 0, 5)], f[(..., 1, 5)], f[(..., 2, 5)], f[(..., 3, 5)], f[(..., 4, 5)], f[(..., 5, 5)]], dim=(- 1))], dim=(- 1))
return (_a_out, _b_out, _c_out, _d_out, _e_out, _f_out) |
def read_json(filename, encoding='utf-8'):
contents = get_file_contents(filename, encoding=encoding)
return json.loads(contents) |
class EventString():
def __init__(self, at=0, value=''):
self.at = at
self.value = value |
def decapitalize(tok):
if (len(tok) == 0):
return tok
(pre, tok) = ((HALF, tok[1:]) if (tok[0] == HALF) else ('', tok))
if (tok[0] == tok[0].lower()):
return (pre + tok)
if ((tok[0] == tok[0].upper()) and ((len(tok) == 1) or (tok[1] != tok[1].upper()))):
return (((CAP + pre) + tok[0].lower()) + tok[1:])
return (pre + tok) |
def test_precision_macro_3d_np_array():
y_true = np.array([[['human', 'mermaid'], ['', '']], [['human', 'minotaur'], ['bull', 'minotaur']]])
y_pred = np.array([[['human', 'mermaid'], ['fish', 'mermaid']], [['human', 'minotaur'], ['bull', 'minotaur']]])
assert (0.8333 == approx(precision(y_true, y_pred, 'macro'), rel=0.001))
assert (1 == precision(y_true, y_true, 'macro')) |
def batchnorm_args_preprocessor(args, kwargs):
converted = []
if (len(args) > 1):
raise TypeError('The `BatchNormalization` layer does not accept positional arguments. Use keyword arguments instead.')
return (args, kwargs, converted) |
(message='scipy.misc.replace_notes_in_docstring is deprecated in Scipy 1.3.0')
def replace_notes_in_docstring(cls, notes):
return _ld.replace_notes_in_docstring(cls, notes) |
def upload_file_to_s3_with_backoff(local_filename, key, *, bucket, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), thread_local=None):
assert pathlib.Path(local_filename).is_file()
if (thread_local is None):
client = get_s3_client()
else:
if (not hasattr(thread_local, 's3_client')):
thread_local.s3_client = get_s3_client()
client = thread_local.s3_client
delay = initial_delay
num_tries_left = num_tries
while (num_tries_left >= 1):
try:
client.upload_file(local_filename, bucket, key, ExtraArgs={'ACL': 'bucket-owner-full-control'})
return
except:
if (num_tries_left == 1):
raise Exception((((('upload backoff failed ' + ' ') + str(key)) + ' ') + str(delay)))
else:
time.sleep(delay)
delay *= delay_factor
num_tries_left -= 1 |
def get_marg_probs(filename='BSSG_input.txt'):
subprocess.call(['/opt/gurobi701/linux64/bin/gurobi.sh', 'BSG_multi_milp.py', filename]) |
def test_BBPSSW_phi_minus_psi_minus():
counter = 0
for i in range(100):
(tl, kept1, kept2, meas1, meas2, ep1, ep2) = create_scenario(phi_minus, psi_minus, i)
assert (kept1.entangled_memory == kept2.entangled_memory == {'node_id': None, 'memo_id': None})
assert (ep1.meas_res != ep2.meas_res)
ket1 = tl.quantum_manager.get(kept1.qstate_key)
ket2 = tl.quantum_manager.get(kept2.qstate_key)
assert (id(ket1) != id(ket2))
assert (len(ket1.keys) == len(ket2.keys) == 1)
if (ep1.meas_res == 0):
counter += 1
assert (abs((counter - 50)) < 10) |
def valid_string_length(label, trailing_dot):
if (len(label) > (254 if trailing_dot else 253)):
return False
return True |
.gpu
def test_gpu_access_on_device_interstate_edge_default():
sdfg = dace.SDFG('tester')
sdfg.add_array('A', [20], dace.float64, storage=dace.StorageType.GPU_Global)
state = sdfg.add_state()
(me, mx) = state.add_map('test', dict(i='0:20'))
nsdfg = dace.SDFG('nester')
nsdfg.add_array('A', [20], dace.float64, storage=dace.StorageType.GPU_Global)
state1 = nsdfg.add_state()
state2 = nsdfg.add_state()
nsdfg.add_edge(state1, state2, dace.InterstateEdge(assignments=dict(s='A[4]')))
nsdfg_node = state.add_nested_sdfg(nsdfg, None, {'A'}, {})
state.add_memlet_path(state.add_read('A'), me, nsdfg_node, dst_conn='A', memlet=dace.Memlet('A[0:20]'))
state.add_nedge(nsdfg_node, mx, dace.Memlet())
sdfg.validate() |
def fuse_four_images(img_paths, image_size):
fuse_img_1 = fuse_two_images(img_paths[0:2], image_size)
fuse_img_2 = fuse_two_images(img_paths[2:4], image_size)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=1)
return fuse_img |
class LifelongAntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='ant.xml', gear_ratio=30, ctrl_cost_weight=0.01, contact_cost_weight=0.0005, healthy_reward=1.0, terminate_when_unhealthy=True, healthy_z_range=(0.2, 1.2), contact_force_range=((- 1.0), 1.0), reset_noise_scale=0.1, exclude_current_positions_from_observation=True, target_vel=DEFAULT_VEL, height_cost=3, target_height=0.7, rgb_rendering_tracking=True, action_noise=0.0):
utils.EzPickle.__init__(**locals())
self._ctrl_cost_weight = ctrl_cost_weight
self._contact_cost_weight = contact_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._contact_force_range = contact_force_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = exclude_current_positions_from_observation
self._target_vel = target_vel
self._target_vel_reward_weight = 1
self._height_cost = height_cost
self._target_height = target_height
self.action_noise = action_noise
xml_path = 'lifelong_rl/envs/environments/assets_llrl/'
model_path = os.path.abspath(os.path.join(xml_path, xml_file))
mujoco_env.MujocoEnv.__init__(self, model_path, 5)
'\n Required for compatibility with lifelong_rl lifelong environment setting\n '
def get_env_state(self):
return self.sim.get_state()
def set_env_state(self, state):
self.sim.set_state(state)
'\n \n '
def healthy_reward(self):
return (float((self.is_healthy or self._terminate_when_unhealthy)) * self._healthy_reward)
def control_cost(self, action):
control_cost = (self._ctrl_cost_weight * np.sum(np.square(action)))
return control_cost
def contact_forces(self):
raw_contact_forces = self.sim.data.cfrc_ext
(min_value, max_value) = self._contact_force_range
contact_forces = np.clip(raw_contact_forces, min_value, max_value)
return contact_forces
def contact_cost(self):
contact_cost = (self._contact_cost_weight * np.sum(np.square(self.contact_forces)))
return contact_cost
def set_target_vel(self, vel):
self._target_vel = vel
def get_target_vel(self):
if (self._target_vel is not None):
return self._target_vel
else:
return DEFAULT_VEL
def is_healthy(self):
state = self.state_vector()
(min_z, max_z) = self._healthy_z_range
is_healthy = (np.isfinite(state).all() and (min_z <= state[2] <= max_z))
return is_healthy
def done(self):
done = ((not self.is_healthy) if self._terminate_when_unhealthy else False)
return done
def step(self, action):
action += (np.random.randn(*action.shape) * self.action_noise)
action = action.clip((- 1.0), 1.0)
xy_position_before = self.get_body_com('torso')[:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.get_body_com('torso')[:2].copy()
xy_velocity = ((xy_position_after - xy_position_before) / self.dt)
(x_velocity, y_velocity) = xy_velocity
z = self.state_vector()[2]
rewards = self.get_target_vel()
vel_cost = abs((x_velocity - self.get_target_vel()))
height_cost = (self._height_cost * ((z - self._target_height) ** 2))
action_cost = (0.01 * np.sum((action ** 2)))
costs = ((vel_cost + height_cost) + action_cost)
reward = (rewards - costs)
done = (not self.is_healthy)
observation = self._get_obs()
info = {'x velocity': x_velocity, 'target velocity': self.get_target_vel(), 'z': z, 'x': self.state_vector()[0], 'y': self.state_vector()[1], 'height cost': height_cost}
return (observation, reward, done, info)
def _get_obs(self):
if self._exclude_current_positions_from_observation:
return np.concatenate([self.sim.data.qpos.flat[2:], self.sim.data.qvel.flat])
else:
return np.concatenate([self.sim.data.qpos.flat, self.sim.data.qvel.flat])
def get_obs(self):
return self._get_obs()
def reset_model(self):
noise_low = (- self._reset_noise_scale)
noise_high = self._reset_noise_scale
qpos = (self.init_qpos + self.np_random.uniform(low=noise_low, high=noise_high, size=self.model.nq))
qvel = (self.init_qvel + (self._reset_noise_scale * self.np_random.randn(self.model.nv)))
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for (key, value) in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value) |
def DFG_python(root_node, index_to_code, states):
assignment = ['assignment', 'augmented_assignment', 'for_in_clause']
if_statement = ['if_statement']
for_statement = ['for_statement']
while_statement = ['while_statement']
do_first_statement = ['for_in_clause']
def_statement = ['default_parameter']
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type == 'string')) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if ((root_node.type == code) or (root_node.type == 'string')):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
elif ((root_node.type == 'identifier') and (root_node.parent.type == 'parameters')):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
else:
return ([], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_python(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
if (root_node.type == 'for_in_clause'):
right_nodes = [root_node.children[(- 1)]]
left_nodes = [root_node.child_by_field_name('left')]
else:
if (root_node.child_by_field_name('right') is None):
return ([], states)
left_nodes = [x for x in root_node.child_by_field_name('left').children if (x.type != ',')]
right_nodes = [x for x in root_node.child_by_field_name('right').children if (x.type != ',')]
if (len(right_nodes) != len(left_nodes)):
left_nodes = [root_node.child_by_field_name('left')]
right_nodes = [root_node.child_by_field_name('right')]
if (len(left_nodes) == 0):
left_nodes = [root_node.child_by_field_name('left')]
if (len(right_nodes) == 0):
right_nodes = [root_node.child_by_field_name('right')]
DFG = []
for node in right_nodes:
(temp, states) = DFG_python(node, index_to_code, states)
DFG += temp
for (left_node, right_node) in zip(left_nodes, right_nodes):
left_tokens_index = tree_to_variable_index(left_node, index_to_code)
right_tokens_index = tree_to_variable_index(right_node, index_to_code)
temp = []
for token1_index in left_tokens_index:
(idx1, code1) = index_to_code[token1_index]
temp.append((code1, idx1, 'computedFrom', [index_to_code[x][1] for x in right_tokens_index], [index_to_code[x][0] for x in right_tokens_index]))
states[code1] = [idx1]
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if (child.type not in ['elif_clause', 'else_clause']):
(temp, current_states) = DFG_python(child, index_to_code, current_states)
DFG += temp
else:
(temp, new_states) = DFG_python(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for i in range(2):
right_nodes = [x for x in root_node.child_by_field_name('right').children if (x.type != ',')]
left_nodes = [x for x in root_node.child_by_field_name('left').children if (x.type != ',')]
if (len(right_nodes) != len(left_nodes)):
left_nodes = [root_node.child_by_field_name('left')]
right_nodes = [root_node.child_by_field_name('right')]
if (len(left_nodes) == 0):
left_nodes = [root_node.child_by_field_name('left')]
if (len(right_nodes) == 0):
right_nodes = [root_node.child_by_field_name('right')]
for node in right_nodes:
(temp, states) = DFG_python(node, index_to_code, states)
DFG += temp
for (left_node, right_node) in zip(left_nodes, right_nodes):
left_tokens_index = tree_to_variable_index(left_node, index_to_code)
right_tokens_index = tree_to_variable_index(right_node, index_to_code)
temp = []
for token1_index in left_tokens_index:
(idx1, code1) = index_to_code[token1_index]
temp.append((code1, idx1, 'computedFrom', [index_to_code[x][1] for x in right_tokens_index], [index_to_code[x][0] for x in right_tokens_index]))
states[code1] = [idx1]
DFG += temp
if (root_node.children[(- 1)].type == 'block'):
(temp, states) = DFG_python(root_node.children[(- 1)], index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states) |
def to_video(ema_model, arg):
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.animation as animation
import matplotlib.image as mpimg
ema_model.eval()
num = 100
if (arg.dataset != 'cifar10'):
num = 25
save_sample_q(ema_model, 0, arg, num=num, video=True)
frames = []
fig = plt.figure()
for i in range(200):
img = mpimg.imread(('iter-%d.png' % (i + 1)))
img = plt.imshow(img, animated=True)
frames.append([img])
ani = animation.ArtistAnimation(fig, frames, interval=200, blit=True, repeat_delay=0)
ani.save(('%s-generate.mp4' % arg.dataset))
print('save a video to show the sampling')
for i in range(200):
os.remove(('iter-%d.png' % (i + 1))) |
def ComputeRHS(rhs, w_hat, solver, work, Tp, VT, VTp, K, K2, K_over_K2, Source, u_dealias, mask, **context):
rhs = solver.conv(rhs, w_hat, work, Tp, VTp, K, K_over_K2, u_dealias)
if (mask is not None):
rhs.mask_nyquist(mask)
rhs = solver.add_linear(rhs, w_hat, params.nu, K2, Source)
return rhs |
def test_pooling1d(device):
from speechbrain.nnet.pooling import Pooling1d
input = torch.tensor([1, 3, 2], device=device).unsqueeze(0).unsqueeze((- 1)).float()
pool = Pooling1d('max', 3).to(device)
output = pool(input)
assert (output == 3)
pool = Pooling1d('avg', 3).to(device)
output = pool(input)
assert (output == 2)
assert torch.jit.trace(pool, input) |
def supported_graphbuilder_generator():
for weighted in [True, False]:
for include_self_edges in [True, False]:
normalize_cases = [False]
if (weighted and include_self_edges):
normalize_cases.append(True)
for normalize_self_edges in normalize_cases:
(yield LabelCooccurrenceGraphBuilder(weighted=weighted, include_self_edges=include_self_edges, normalize_self_edges=normalize_self_edges)) |
class Helper(HelperBase):
def increment_average(self, model, model_next, n):
return np.add(model, ((model_next - model) / n))
def save(self, model, path=None):
if (not path):
(_, path) = tempfile.mkstemp()
np.savetxt(path, model)
return path
def load(self, path):
model = np.loadtxt(path)
return model |
def test_register_cached(auth_storage, auth_provider_class):
auth_storage.register()(auth_provider_class)
assert auth_storage.providers
assert isinstance(auth_storage.providers[0], CachingAuthProvider)
assert isinstance(auth_storage.providers[0].provider, auth_provider_class) |
def test_two_tag_training_backprop(pretrain_file, tmp_path):
trainer = run_two_tag_training(pretrain_file, tmp_path)
trainer.save(os.path.join(trainer.args['save_dir'], trainer.args['save_name']))
new_trainer = run_two_tag_training(pretrain_file, tmp_path, '--finetune')
assert (len(trainer.model.tag_clfs) == 2)
assert (len(new_trainer.model.tag_clfs) == 2)
for (old_clf, new_clf) in zip(trainer.model.tag_clfs, new_trainer.model.tag_clfs):
assert (not torch.allclose(old_clf.weight, new_clf.weight)) |
class TestNagFCompilerVersions(object):
def test_version_match(self):
for (comp, vs, version) in nag_version_strings:
fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
v = fc.version_match(vs)
assert_((v == version)) |
def rotate_pose_msg_by_euler_angles(pose, r, p, y):
initial = matrix_from_pose_msg(pose)
transform = quaternion_matrix(quaternion_from_euler(r, p, y))
return pose_msg_from_matrix(concatenate_matrices(initial, transform)) |
def dict_to_json(dict, fname):
with open(fname, 'a') as f:
json.dump(dict, f)
f.write('\n') |
class Code(io.StringIO):
def start(self, indent, fmt, *args):
self.write((u' ' * indent))
self.add(fmt, *args)
def add(self, fmt, *args):
self.write(self._format(fmt, args))
def end(self, fmt, *args):
self.add(fmt, *args)
self.write(u'\n')
def _format(self, fmt, args):
if isinstance(fmt, bytes):
fmt = fmt.decode('utf-8')
return (fmt % args)
def __call__(self, indent, fmt, *args):
self.write((u' ' * indent))
self.write(self._format(fmt, args))
self.write(u'\n') |
def readJSONLine(path, verbose=False):
input = readTXTFile(path)
data = []
for each_line in input:
each_line = each_line.strip()
each_line = json.loads(each_line)
data.append(each_line)
if verbose:
print('[I] file read complete')
return data |
def test_BinIOUSegmLoss():
reset_seed(0, check_cudnn=False)
instance = BinIOUSegmLoss(smooth=1.0)
announce_msg('Testing {}'.format(instance))
cuda = 0
DEVICE = torch.device(('cuda:{}'.format(cuda) if torch.cuda.is_available() else 'cpu'))
if torch.cuda.is_available():
torch.cuda.set_device(int(cuda))
instance.to(DEVICE)
(h, w) = (32, 64)
b = 10
pred_m = torch.randint(low=0, high=2, size=(b, 1, h, w), dtype=torch.float).to(DEVICE)
true_m = torch.randint(low=0, high=2, size=(b, 1, h, w), dtype=torch.float).to(DEVICE)
probs = 0.0
loss = instance(pred_m.view(b, (- 1)), true_m.view(b, (- 1)))
print('Props = {}. Loss value: {}'.format(probs, loss))
print('sum loss {}'.format(loss.sum()))
print(loss.shape)
probs = 0.9
gater = bernoulli.Bernoulli(probs=torch.tensor([(1.0 - probs)]))
gate = gater.sample(sample_shape=true_m.view(b, (- 1)).shape).squeeze(dim=(- 1))
gate = gate.to(DEVICE)
loss = instance(pred_m.view(b, (- 1)), true_m.view(b, (- 1)), gate)
print('Props = {}. Loss value: {}'.format(probs, loss))
print('sum loss {}'.format(loss.sum())) |
def ideal_to_gfan_format(input_ring, polys):
ideal_gen_str = (('{' + ','.join((str(poly).replace(' ', '').replace("'", '') for poly in polys))) + '}')
ring_str = ring_to_gfan_format(input_ring)
output = (ring_str + ideal_gen_str)
return output |
.parametrize('key, mat, quad', some_mats_and_quads)
def test_isub(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if (len(key) == 3):
measure = key[2]
if (quad == 'GL'):
return
t0 = test[0]
t1 = trial[0]
if (trial[0] in bcbases):
t1 = functools.partial(t1, bc=bcs[np.random.randint(0, 6)])
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
m = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = m.copy()
m -= mc
assert (np.linalg.norm(m.diags('csr').data) < 1e-08)
m1 = SparseMatrix(deepcopy(dict(mc)), m.shape)
m2 = SparseMatrix(deepcopy(dict(mc)), m.shape)
m1 -= m2
assert (np.linalg.norm(m1.diags('csr').data) < 1e-08) |
def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):
shape = _good_shape(x, shape, axes)
return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x) |
class SchurTensorModule(CombinatorialFreeModule_Tensor):
def __init__(self, R, n, r):
C = CombinatorialFreeModule(R, list(range(1, (n + 1))))
self._n = n
self._r = r
self._sga = SymmetricGroupAlgebra(R, r)
self._schur = SchurAlgebra(R, n, r)
cat = ModulesWithBasis(R).TensorProducts().FiniteDimensional()
CombinatorialFreeModule_Tensor.__init__(self, tuple(([C] * r)), category=cat)
g = self._schur.module_morphism(self._monomial_product, codomain=self)
self._schur_action = self.module_morphism(g, codomain=self, position=1)
def _repr_(self):
msg = 'The {}-fold tensor product of a free module of dimension {}'
msg += ' over {}'
return msg.format(self._r, self._n, self.base_ring())
def construction(self):
return None
def _monomial_product(self, xi, v):
ret = []
for i in itertools.product(list(range(1, (self._n + 1))), repeat=self._r):
if (schur_representative_from_index(i, v) == xi):
ret.append(tuple(i))
return self.sum_of_monomials(ret)
class Element(CombinatorialFreeModule_Tensor.Element):
def _acted_upon_(self, elt, self_on_left=False):
P = self.parent()
if self_on_left:
if (elt in P._sga):
return P.sum_of_terms(((tuple([m[(i - 1)] for i in me]), (c * ce)) for (m, c) in self for (me, ce) in elt))
if (elt in P._sga._indices):
return P.sum_of_terms(((tuple([m[(i - 1)] for i in elt]), c) for (m, c) in self))
elif (elt in P._schur):
return P._schur_action(elt, self)
return super()._acted_upon_(elt, self_on_left) |
class GroupedEpochBatchIterator(EpochBatchIterator):
def __init__(self, dataset, collate_fn, batch_samplers, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0, mult_rate=1, buffer_size=0):
super().__init__(dataset, collate_fn, batch_samplers, seed, num_shards, shard_id, num_workers, epoch, buffer_size)
self._frozen_batches = tuple([tuple(sub_batch) for sub_batch in batch_samplers])
self.step_size = (mult_rate * num_shards)
self.lengths = [((len(x) // self.step_size) * self.step_size) for x in self.frozen_batches]
def __len__(self):
return sum(self.lengths)
def first_batch(self):
if (len(self.frozen_batches) == 0):
raise Exception('The dataset is empty. This could indicate that all elements in the dataset have been skipped. Try increasing the max number of allowed tokens or using a larger dataset.')
if self.dataset.supports_fetch_outside_dataloader:
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0][0]])
else:
return 'DUMMY'
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
def return_full_batches(batch_sets, seed, shuffle):
if shuffle:
batch_sets = [shuffle_batches(list(x), seed) for x in batch_sets]
batch_sets = [batch_sets[i][:self.lengths[i]] for i in range(len(batch_sets))]
batches = list(itertools.chain.from_iterable(batch_sets))
if shuffle:
with data_utils.numpy_seed(seed):
idx = np.random.permutation((len(batches) // self.step_size))
if ((len(idx) * self.step_size) != len(batches)):
raise ValueError(('ERROR: %d %d %d %d' % (len(idx), self.step_size, len(batches), self.shard_id)), ':'.join([('%d' % x) for x in self.lengths]))
mini_shards = [batches[(i * self.step_size):((i + 1) * self.step_size)] for i in idx]
batches = list(itertools.chain.from_iterable(mini_shards))
return batches
if self._supports_prefetch:
raise NotImplementedError('To be implemented')
else:
batches = return_full_batches(self.frozen_batches, (self.seed + epoch), shuffle)
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
if ((offset > 0) and (offset >= len(batches))):
return None
if (self.num_workers > 0):
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
itr = torch.utils.data.DataLoader(self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers)
if (self.buffer_size > 0):
itr = BufferedIterator(self.buffer_size, itr)
return CountingIterator(itr, start=offset) |
def update_shard_values_for_worker(num_workers, worker_id):
num_shards_per_worker = 1
for num_shards in tf.get_collection(shard.NUM_SHARDS):
num_shards_tensor = num_shards.op.node_def.attr['value'].tensor
num_shards_per_worker = num_shards_tensor.int64_val[0]
num_shards_tensor.int64_val[0] *= num_workers
num_shards.op._set_attr('value', attr_value_pb2.AttrValue(tensor=num_shards_tensor))
assert (num_shards.op.node_def.attr['value'].tensor.int64_val[0] == (num_shards_per_worker * num_workers))
for shard_id in tf.get_collection(shard.SHARD_ID):
shard_id_tensor = shard_id.op.node_def.attr['value'].tensor
shard_id_tensor.int64_val[0] += (num_shards_per_worker * worker_id)
shard_id.op._set_attr('value', attr_value_pb2.AttrValue(tensor=shard_id_tensor))
assert (shard_id.op.node_def.attr['value'].tensor.int64_val[0] == shard_id_tensor.int64_val[0])
if (len(tf.get_collection(shard.SHARD_FILTER_PRED)) > 0):
shard_filter_pred_names = [v.decode('ascii') for v in tf.get_collection(shard.SHARD_FILTER_PRED)]
for op in tf.get_default_graph().get_operations():
if ('dataset_factory' not in op.node_def.attr):
continue
func_name = op.node_def.attr['dataset_factory'].func.name
dataset_factory_func = tf.get_default_graph()._get_function(func_name)
dataset_factory_func_def = dataset_factory_func.definition
node_name_to_node = {}
for node in dataset_factory_func_def.node_def:
node_name_to_node[node.name] = node
if (('predicate' in node.attr) and (node.attr['predicate'].func.name in shard_filter_pred_names)):
num_shards_node_name = node.input[shard.FILTER_DATASET_NUM_SHARDS_POS].split(':output:0')[0]
shard_id_node_name = node.input[shard.FILTER_DATASET_SHARD_ID_POS].split(':output:0')[0]
num_shards_node = node_name_to_node[num_shards_node_name]
shard_id_node = node_name_to_node[shard_id_node_name]
num_shards_per_worker = num_shards_node.attr['value'].tensor.int64_val[0]
num_shards_node.attr['value'].tensor.int64_val[0] *= num_workers
shard_id_node.attr['value'].tensor.int64_val[0] += (num_shards_per_worker * worker_id)
if dataset_factory_func._c_func:
func_name = ('%s_%d' % (func_name, shard_id_node.attr['value'].tensor.int64_val[0]))
dataset_factory_func._func_name = func_name
dataset_factory_func_def.signature.name = func_name
serialized = dataset_factory_func_def.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
dataset_factory_func._c_func = c_api_util.ScopedTFFunction(c_func)
tf.get_default_graph()._add_function(dataset_factory_func)
op_func = op.node_def.attr['dataset_factory'].func
op_func.name = func_name
op._set_attr('dataset_factory', attr_value_pb2.AttrValue(func=op_func))
break
assert (dataset_factory_func == tf.get_default_graph()._get_function(func_name)) |
(frozen=True)
class DecodeRequest():
tokens: List[int]
tokenizer: str = 'huggingface/gpt2'
clean_up_tokenization_spaces: bool = False
def tokenizer_organization(self):
return self.tokenizer.split('/')[0]
def tokenizer_name(self):
return self.tokenizer.split('/')[1] |
def find_matching_trees(docs, num_sentences, accepted_trees, tag_pipe, parser_pipes, shuffle=True, chunk_size=10, max_len=140, min_len=10, output_ptb=False):
if (num_sentences < 0):
tqdm_total = len(docs)
else:
tqdm_total = num_sentences
if output_ptb:
output_format = '{}'
else:
output_format = '{:L}'
with tqdm(total=tqdm_total, leave=False) as pbar:
if shuffle:
random.shuffle(docs)
new_trees = set()
for chunk_start in range(0, len(docs), chunk_size):
chunk = docs[chunk_start:(chunk_start + chunk_size)]
chunk = [stanza.Document([], text=t) for t in chunk]
if (num_sentences < 0):
pbar.update(len(chunk))
tag_pipe(chunk)
chunk = [d for d in chunk if (len(d.sentences) > 0)]
if (max_len is not None):
chunk = [d for d in chunk if (max((len(s.words) for s in d.sentences)) < max_len)]
if (len(chunk) == 0):
continue
parses = []
try:
for pipe in parser_pipes:
pipe(chunk)
trees = [output_format.format(sent.constituency) for doc in chunk for sent in doc.sentences if (len(sent.words) >= min_len)]
parses.append(trees)
except TextTooLongError as e:
continue
for tree in zip(*parses):
if (len(set(tree)) != 1):
continue
tree = tree[0]
if (tree in accepted_trees):
continue
if (tree not in new_trees):
new_trees.add(tree)
if (num_sentences >= 0):
pbar.update(1)
if ((num_sentences >= 0) and (len(new_trees) >= num_sentences)):
return new_trees
return new_trees |
def test_BitMaskedArray_NumpyArray():
v2a = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1], np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True, length=13, lsb_order=False)
resultv2 = v2a[np.array([0, 1, 4], np.int64)]
assert (to_list(resultv2) == [0.0, 1.0, None])
assert (v2a.to_typetracer()[np.array([0, 1, 4], np.int64)].form == resultv2.form)
v2b = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0], np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=False, length=13, lsb_order=False)
resultv2 = v2b[np.array([0, 1, 4], np.int64)]
assert (to_list(resultv2) == [0.0, 1.0, None])
assert (v2b.to_typetracer()[np.array([0, 1, 4], np.int64)].form == resultv2.form)
v2c = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1], np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True, length=13, lsb_order=True)
resultv2 = v2c[np.array([0, 1, 4], np.int64)]
assert (to_list(resultv2) == [0.0, 1.0, None])
assert (v2c.to_typetracer()[np.array([0, 1, 4], np.int64)].form == resultv2.form)
v2d = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=False, length=13, lsb_order=True)
resultv2 = v2d[np.array([0, 1, 4], np.int64)]
assert (to_list(resultv2) == [0.0, 1.0, None])
assert (v2d.to_typetracer()[np.array([0, 1, 4], np.int64)].form == resultv2.form) |
def v2w(signal, n):
s = ''
for i in range((n - 1), 0, (- 1)):
s = (((s + signal) + str(i)) + ', ')
s = ((s + signal) + '0')
return s |
def test_observers(short_test_case):
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
executor = TestCaseExecutor(tracer)
observer = MagicMock()
observer.before_statement_execution.side_effect = (lambda x, y, z: y)
executor.add_observer(observer)
executor.execute(short_test_case)
assert (observer.before_test_case_execution.call_count == 1)
assert (observer.before_statement_execution.call_count == 2)
assert (observer.after_statement_execution.call_count == 2)
assert (observer.after_test_case_execution_inside_thread.call_count == 1)
assert (observer.after_test_case_execution_outside_thread.call_count == 1) |
def QuaternionMatrixGroupGF3():
from sage.rings.finite_rings.finite_field_constructor import FiniteField
from sage.matrix.matrix_space import MatrixSpace
MS = MatrixSpace(FiniteField(3), 2)
aye = MS([1, 1, 1, 2])
jay = MS([2, 1, 1, 1])
return MatrixGroup([aye, jay]) |
def to_mkldnn(module):
def m_fn(m):
if isinstance(m, torch.nn.Linear):
return MkldnnLinear(m)
elif isinstance(m, torch.nn.Conv1d):
return MkldnnConv1d(m)
elif isinstance(m, torch.nn.Conv2d):
return MkldnnConv2d(m)
elif isinstance(m, torch.nn.Conv3d):
return MkldnnConv3d(m)
elif (isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.BatchNorm3d)):
return MkldnnBatchNorm(m)
else:
return m
def m_fn_rec(m):
new_m = m_fn(m)
for (name, sub_m) in m.named_children():
setattr(new_m, name, m_fn_rec(sub_m))
return new_m
return m_fn_rec(module) |
def read_directory(dirname, broken_ok=False, tree_callback=None):
trees = []
for filename in sorted(os.listdir(dirname)):
full_name = os.path.join(dirname, filename)
trees.extend(read_tree_file(full_name, broken_ok, tree_callback))
return trees |
def selectTrainData(tweets, targets):
inv_topics = {v: k for (k, v) in preprocess.TOPICS_LONG.items()}
inlist = []
outcnt = 0
for (i, tweet) in enumerate(tweets):
target_keywords = preprocess.KEYWORDS.get(inv_topics.get(targets[i]))
target_in_tweet = 0
for key in target_keywords:
if (key.lower() in tweet.lower()):
target_in_tweet = 1
break
if (target_in_tweet == 1):
inlist.append(i)
else:
outcnt += 1
print('Incnt', len(inlist), 'Outcnt', outcnt)
return inlist |
.parametrize('channel_axis', [0, 1, 2, (- 1), (- 2), (- 3)])
def test_build_laplacian_pyramid_rgb(channel_axis):
image = data.astronaut()
(rows, cols, dim) = image.shape
image = np.moveaxis(image, source=(- 1), destination=channel_axis)
pyramid = pyramids.pyramid_laplacian(image, downscale=2, channel_axis=channel_axis)
for (layer, out) in enumerate(pyramid):
layer_shape = [(rows / (2 ** layer)), (cols / (2 ** layer))]
layer_shape.insert((channel_axis % image.ndim), dim)
assert (out.shape == tuple(layer_shape)) |
class Attention(nn.Module):
def __init__(self, style_dim=64):
super().__init__()
self.layers = nn.Sequential(nn.Linear(style_dim, style_dim), nn.ReLU(), nn.Linear(style_dim, style_dim))
def forward(self, s):
return self.layers(s) |
class SawyerPlateSlideSideEnv(SawyerXYZEnv):
def __init__(self):
goal_low = ((- 0.3), 0.6, 0.02)
goal_high = ((- 0.25), 0.7, 0.02)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (0.0, 0.6, 0.015)
obj_high = (0.0, 0.6, 0.015)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0.0, 0.6, 0.015], dtype=np.float32), 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32)}
self.goal = np.array([(- 0.25), 0.6, 0.02])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_plate_slide_sideway.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.08))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def _set_objCOM_marker(self):
objPos = self.data.get_geom_xpos('handle')
self.data.site_xpos[self.model.site_name2id('objSite')] = objPos
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
if self.random_init:
obj_pos = self._get_state_rand_vec()
self.obj_init_pos = obj_pos[:3]
goal_pos = obj_pos[3:]
self._target_pos = goal_pos
self.sim.model.body_pos[self.model.body_name2id('cabinet')] = self._target_pos
self._set_obj_xyz(np.zeros(2))
self.maxDist = np.linalg.norm((self.obj_init_pos[:(- 1)] - self._target_pos[:(- 1)]))
self.target_reward = ((1000 * self.maxDist) + (1000 * 2))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
reachDist = np.linalg.norm((objPos - fingerCOM))
pullDist = np.linalg.norm((objPos[:(- 1)] - pullGoal[:(- 1)]))
c1 = 1000
c2 = 0.01
c3 = 0.001
if (reachDist < 0.05):
pullRew = ((1000 * (self.maxDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
else:
pullRew = 0
reward = ((- reachDist) + pullRew)
return [reward, reachDist, pullDist] |
()
('dump_db_file', type=click.Path(exists=True))
('tokenizer_name')
('entity_vocab_file', type=click.Path(exists=True))
('output_dir', type=click.Path(file_okay=False))
('--language', type=str)
('--sentence-splitter', default='en')
('--max-seq-length', default=512)
('--max-entity-length', default=128)
('--max-mention-length', default=30)
('--min-sentence-length', default=5)
('--abstract-only', is_flag=True)
('--include-sentences-without-entities', is_flag=True)
('--include-unk-entities/--skip-unk-entities', default=False)
('--pool-size', default=multiprocessing.cpu_count())
('--chunk-size', default=100)
('--max-num-documents', default=None, type=int)
('--predefined-entities-only', is_flag=True)
def build_wikipedia_pretraining_dataset(dump_db_file: str, tokenizer_name: str, entity_vocab_file: str, output_dir: str, language: Optional[str], sentence_splitter: str, **kwargs):
dump_db = DumpDB(dump_db_file)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=False)
sentence_splitter = SentenceSplitter.from_name(sentence_splitter)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
entity_vocab = EntityVocab(entity_vocab_file)
WikipediaPretrainingDataset.build(dump_db, tokenizer, sentence_splitter, entity_vocab, output_dir, language, **kwargs) |
def rot6d_to_quat(rotation_6d: Union[(torch.Tensor, numpy.ndarray)]) -> Union[(torch.Tensor, numpy.ndarray)]:
if (rotation_6d.shape[(- 1)] != 6):
raise ValueError(f'Invalid input rotation_6d shape f{rotation_6d.shape}.')
t = Compose([rotation_6d_to_matrix, matrix_to_quaternion])
return t(rotation_6d) |
class hypsecant_gen(rv_continuous):
def _shape_info(self):
return []
def _pdf(self, x):
return (1.0 / (np.pi * np.cosh(x)))
def _cdf(self, x):
return ((2.0 / np.pi) * np.arctan(np.exp(x)))
def _ppf(self, q):
return np.log(np.tan(((np.pi * q) / 2.0)))
def _sf(self, x):
return ((2.0 / np.pi) * np.arctan(np.exp((- x))))
def _isf(self, q):
return (- np.log(np.tan(((np.pi * q) / 2.0))))
def _stats(self):
return (0, ((np.pi * np.pi) / 4), 0, 2)
def _entropy(self):
return np.log((2 * np.pi)) |
def register_Ns3UanMacAloha_methods(root_module, cls):
cls.add_constructor([param('ns3::UanMacAloha const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True)
cls.add_method('AttachPhy', 'void', [param('ns3::Ptr< ns3::UanPhy >', 'phy')], is_virtual=True)
cls.add_method('Clear', 'void', [], is_virtual=True)
cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'pkt'), param('uint16_t', 'protocolNumber'), param('ns3::Address const &', 'dest')], is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetForwardUpCb', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, unsigned short, ns3::Mac8Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
def get_root_logger(log_file=None, log_level=logging.INFO):
logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)
return logger |
def G_adv_loss(pred_fake, w=None):
w = match_size(w, pred_fake)
return (w * (- pred_fake)).mean() |
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, adaptive=False, **kwargs):
assert (rho >= 0.0), f'Invalid rho, should be non-negative: {rho}'
defaults = dict(rho=rho, adaptive=adaptive, **kwargs)
super(SAM, self).__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
_grad()
def first_step(self, zero_grad=False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = (group['rho'] / (grad_norm + 1e-12))
for p in group['params']:
if (p.grad is None):
continue
e_w = (((torch.pow(p, 2) if group['adaptive'] else 1.0) * p.grad) * scale.to(p))
p.add_(e_w)
self.state[p]['e_w'] = e_w
if zero_grad:
self.zero_grad()
_grad()
def second_step(self, zero_grad=False):
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
p.sub_(self.state[p]['e_w'])
self.base_optimizer.step()
if zero_grad:
self.zero_grad()
_grad()
def step(self, closure=None):
assert (closure is not None), 'Sharpness Aware Minimization requires closure, but it was not provided'
closure = torch.enable_grad()(closure)
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
shared_device = self.param_groups[0]['params'][0].device
norm = torch.norm(torch.stack([((torch.abs(p) if group['adaptive'] else 1.0) * p.grad).norm(p=2).to(shared_device) for group in self.param_groups for p in group['params'] if (p.grad is not None)]), p=2)
return norm |
def ref_linear_interpolate_2d(x, output_size, align_corners, half_pixel):
oshape = output_size
ishape = x.shape[(- 2):]
xx = x.reshape((- 1), *ishape)
ib = np.arange(xx.shape[0])
scale = (compute_scale(ishape[0], oshape[0], align_corners), compute_scale(ishape[1], oshape[1], align_corners))
index = (get_source_index(scale[0], np.arange(oshape[0]), half_pixel), get_source_index(scale[1], np.arange(oshape[1]), half_pixel))
index_1 = (index[0].astype(np.int32), index[1].astype(np.int32))
index_2 = (np.minimum((index_1[0] + 1), (ishape[0] - 1)), np.minimum((index_1[1] + 1), (ishape[1] - 1)))
dist_1 = ((index[0] - index_1[0]).reshape(1, (- 1), 1), (index[1] - index_1[1]).reshape(1, 1, (- 1)))
dist_2 = ((1.0 - dist_1[0]), (1.0 - dist_1[1]))
val0 = (dist_2[1] * xx[np.ix_(ib, index_1[0], index_1[1])])
val1 = (dist_1[1] * xx[np.ix_(ib, index_1[0], index_2[1])])
val2 = (dist_2[1] * xx[np.ix_(ib, index_2[0], index_1[1])])
val3 = (dist_1[1] * xx[np.ix_(ib, index_2[0], index_2[1])])
yy = ((dist_2[0] * (val0 + val1)) + (dist_1[0] * (val2 + val3)))
return yy.reshape((x.shape[:(- len(oshape))] + oshape)) |
class StripTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, id_to_strip):
super().__init__(dataset)
self.id_to_strip = id_to_strip
def __getitem__(self, index):
item = self.dataset[index]
return item[item.ne(self.id_to_strip)] |
class ViewNet(nn.Module):
def __init__(self):
super(ViewNet, self).__init__()
print('ViewNet...')
self.net = encoder3D2D.Net3D2D(hyp.feat3D_dim, 64, 32, hyp.view_depth, depth_pool=8).cuda()
self.rgb_layer = nn.Sequential(nn.LeakyReLU(), nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(), nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0)).cuda()
self.emb_layer = nn.Sequential(nn.LeakyReLU(), nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(), nn.Conv2d(32, hyp.feat2D_dim, kernel_size=1, stride=1, padding=0)).cuda()
print(self.net)
def forward(self, pix_T_cam0, cam0_T_cam1, feat_mem1, rgb_g, vox_util, valid=None, summ_writer=None, test=False, suffix=''):
total_loss = torch.tensor(0.0).cuda()
(B, C, H, W) = list(rgb_g.shape)
(PH, PW) = (hyp.PH, hyp.PW)
if ((PH < H) or (PW < W)):
sy = (float(PH) / float(H))
sx = (float(PW) / float(W))
pix_T_cam0 = utils_geom.scale_intrinsics(pix_T_cam0, sx, sy)
if (valid is not None):
valid = F.interpolate(valid, scale_factor=0.5, mode='nearest')
rgb_g = F.interpolate(rgb_g, scale_factor=0.5, mode='bilinear')
feat_proj = vox_util.apply_pixX_T_memR_to_voxR(pix_T_cam0, cam0_T_cam1, feat_mem1, hyp.view_depth, PH, PW)
feat = self.net(feat_proj)
rgb = self.rgb_layer(feat)
emb = self.emb_layer(feat)
emb = utils_basic.l2_normalize(emb, dim=1)
if test:
return (None, rgb, None)
loss_im = utils_basic.l1_on_axis((rgb - rgb_g), 1, keepdim=True)
if (valid is not None):
rgb_loss = utils_basic.reduce_masked_mean(loss_im, valid)
else:
rgb_loss = torch.mean(loss_im)
total_loss = utils_misc.add_loss('view/rgb_l1_loss', total_loss, rgb_loss, hyp.view_l1_coeff, summ_writer)
(dy, dx) = utils_basic.gradient2D(rgb, absolute=True)
smooth_im = torch.mean((dy + dx), dim=1, keepdims=True)
if (summ_writer is not None):
summ_writer.summ_oned('view/smooth_loss', smooth_im)
smooth_loss = torch.mean(smooth_im)
total_loss = utils_misc.add_loss('view/smooth_loss', total_loss, smooth_loss, hyp.view_smooth_coeff, summ_writer)
if (summ_writer is not None):
summ_writer.summ_oned('view/rgb_loss', loss_im)
summ_writer.summ_rgbs('view/rgb', [rgb.clamp((- 0.5), 0.5), rgb_g])
summ_writer.summ_rgb('view/rgb_e', rgb.clamp((- 0.5), 0.5))
summ_writer.summ_rgb('view/rgb_g', rgb_g.clamp((- 0.5), 0.5))
summ_writer.summ_feat('view/emb', emb, pca=True)
if (valid is not None):
summ_writer.summ_rgb('view/rgb_e_valid', (valid * rgb.clamp((- 0.5), 0.5)))
summ_writer.summ_rgb('view/rgb_g_valid', (valid * rgb_g.clamp((- 0.5), 0.5)))
return (total_loss, rgb, emb) |
def check_or_download_inception(inception_path):
INCEPTION_URL = '
if (inception_path is None):
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = (inception_path / 'classify_image_graph_def.pb')
if (not model_file.exists()):
print('Downloading Inception model')
from urllib import request
import tarfile
(fn, _) = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file) |
.parametrize('in_shape', [(1, 2, 3)])
def test_swish(in_shape: Sequence[int]) -> None:
x = torch.rand(in_shape)
swish = Swish()
y = swish(x)
assert (y.shape == in_shape)
assert torch.allclose(y, (x * torch.sigmoid(x))) |
class SpatialReflectionPadding(Module):
def __init__(self, pad_l, pad_r=None, pad_t=None, pad_b=None):
super(SpatialReflectionPadding, self).__init__()
self.pad_l = pad_l
self.pad_r = (pad_r if (pad_r is not None) else pad_l)
self.pad_t = (pad_t if (pad_t is not None) else pad_l)
self.pad_b = (pad_b if (pad_b is not None) else pad_l)
def updateOutput(self, input):
assert (input.dim() == 4)
self._backend.SpatialReflectionPadding_updateOutput(self._backend.library_state, input, self.output, self.pad_l, self.pad_r, self.pad_t, self.pad_b)
return self.output
def updateGradInput(self, input, gradOutput):
assert ((input.dim() == 4) and (gradOutput.dim() == 4))
assert ((input.size(0) == gradOutput.size(0)) and (input.size(1) == gradOutput.size(1)) and (((input.size(2) + self.pad_t) + self.pad_b) == gradOutput.size(2)) and (((input.size(3) + self.pad_l) + self.pad_r) == gradOutput.size(3)))
self._backend.SpatialReflectionPadding_updateGradInput(self._backend.library_state, input, gradOutput, self.gradInput, self.pad_l, self.pad_r, self.pad_t, self.pad_b)
return self.gradInput
def __repr__(self):
s = super(SpatialReflectionPadding, self).__repr__()
s += '({}, {}, {}, {})'.format(self.pad_l, self.pad_r, self.pad_t, self.pad_b)
return s |
class UnionType(LayoutBuilderType):
def __init__(self, tags_dtype, index_dtype, contents, parameters):
super().__init__(name=f'ak.lb.Union({tags_dtype}, {index_dtype}, {contents}, parameters={parameters!r})')
self._tags_dtype = tags_dtype
self._index_dtype = index_dtype
self._contents = contents
self._init(parameters)
def tags(self):
return ak.numba.GrowableBufferType(self._tags_dtype)
def index(self):
return ak.numba.GrowableBufferType(self._index_dtype)
def contents(self):
return numba.types.Tuple([to_numbatype(it) for it in self._contents]) |
def _build_vocabulary(input_files):
if FLAGS.vocab_file:
tf.logging.info('Loading existing vocab file.')
vocab = collections.OrderedDict()
with tf.gfile.GFile(FLAGS.vocab_file, mode='r') as f:
for (i, line) in enumerate(f):
word = line.decode('utf-8').strip()
assert (word not in vocab), ('Attempting to add word twice: %s' % word)
vocab[word] = i
tf.logging.info('Read vocab of size %d from %s', len(vocab), FLAGS.vocab_file)
return vocab
tf.logging.info('Creating vocabulary.')
num = 0
wordcount = collections.Counter()
for input_file in input_files:
tf.logging.info('Processing file: %s', input_file)
for sentence in tf.gfile.FastGFile(input_file):
wordcount.update(sentence.split())
num += 1
if ((num % 1000000) == 0):
tf.logging.info('Processed %d sentences', num)
tf.logging.info('Processed %d sentences total', num)
words = wordcount.keys()
freqs = wordcount.values()
sorted_indices = np.argsort(freqs)[::(- 1)]
vocab = collections.OrderedDict()
vocab[special_words.EOS] = special_words.EOS_ID
vocab[special_words.UNK] = special_words.UNK_ID
for (w_id, w_index) in enumerate(sorted_indices[0:(FLAGS.num_words - 2)]):
vocab[words[w_index]] = (w_id + 2)
tf.logging.info('Created vocab with %d words', len(vocab))
vocab_file = os.path.join(FLAGS.output_dir, 'vocab.txt')
with tf.gfile.FastGFile(vocab_file, 'w') as f:
f.write('\n'.join(vocab.keys()))
tf.logging.info('Wrote vocab file to %s', vocab_file)
word_counts_file = os.path.join(FLAGS.output_dir, 'word_counts.txt')
with tf.gfile.FastGFile(word_counts_file, 'w') as f:
for i in sorted_indices:
f.write(('%s %d\n' % (words[i], freqs[i])))
tf.logging.info('Wrote word counts file to %s', word_counts_file)
return vocab |
class ModuleDict(Module):
_modules: Dict[(str, Module)]
def __init__(self, modules: Optional[Mapping[(str, Module)]]=None) -> None:
super(ModuleDict, self).__init__()
if (modules is not None):
self.update(modules)
_copy_to_script_wrapper
def __getitem__(self, key: str) -> Module:
return self._modules[key]
def __setitem__(self, key: str, module: Module) -> None:
self.add_module(key, module)
def __delitem__(self, key: str) -> None:
del self._modules[key]
_copy_to_script_wrapper
def __len__(self) -> int:
return len(self._modules)
_copy_to_script_wrapper
def __iter__(self) -> Iterator[str]:
return iter(self._modules)
_copy_to_script_wrapper
def __contains__(self, key: str) -> bool:
return (key in self._modules)
def clear(self) -> None:
self._modules.clear()
def pop(self, key: str) -> Module:
v = self[key]
del self[key]
return v
_copy_to_script_wrapper
def keys(self) -> Iterable[str]:
return self._modules.keys()
_copy_to_script_wrapper
def items(self) -> Iterable[Tuple[(str, Module)]]:
return self._modules.items()
_copy_to_script_wrapper
def values(self) -> Iterable[Module]:
return self._modules.values()
def update(self, modules: Mapping[(str, Module)]) -> None:
if (not isinstance(modules, container_abcs.Iterable)):
raise TypeError(('ModuleDict.update should be called with an iterable of key/value pairs, but got ' + type(modules).__name__))
if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
for (key, module) in modules.items():
self[key] = module
else:
for (j, m) in enumerate(modules):
if (not isinstance(m, container_abcs.Iterable)):
raise TypeError(((('ModuleDict update sequence element #' + str(j)) + ' should be Iterable; is') + type(m).__name__))
if (not (len(m) == 2)):
raise ValueError((((('ModuleDict update sequence element #' + str(j)) + ' has length ') + str(len(m))) + '; 2 is required'))
self[m[0]] = m[1] |
class FlaxResNetPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class Normalize():
def __init__(self, mean, std, device='cpu'):
self.mean = torch.tensor(mean, device=device).reshape(1, len(mean), 1, 1)
self.std = torch.tensor(std, device=device).reshape(1, len(mean), 1, 1)
def __call__(self, x, seed=(- 1)):
return ((x - self.mean) / self.std) |
def J_adjoint_checkpointing(model, src_coords, wavelet, rec_coords, recin, space_order=8, is_residual=False, n_checkpoints=None, born_fwd=False, return_obj=False, ic='as', ws=None, nlind=False, f0=0.015, misfit=None, illum=False, fw=True):
ffunc = op_fwd_J[born_fwd]
(op_f, u, rec_g, kwu) = ffunc(model, src_coords, rec_coords, wavelet, fw=fw, save=False, space_order=space_order, return_op=True, ic=ic, nlind=nlind, ws=ws, f0=f0, illum=illum)
(op, g, kwg) = gradient(model, recin, rec_coords, u, space_order=space_order, return_op=True, ic=ic, f0=f0, save=False, illum=illum, fw=fw)
nt = wavelet.shape[0]
rec = Receiver(name='rec', grid=model.grid, ntime=nt, coordinates=rec_coords)
kwg['srcv'] = rec
cpwf = [uu for uu in as_tuple(u)]
if model.is_viscoacoustic:
cpwf += [memory_field(u)]
cp = DevitoCheckpoint(cpwf)
wrap_fw = CheckpointOperator(op_f, **kwu)
wrap_rev = CheckpointOperator(op, **kwg)
wrp = Revolver(cp, wrap_fw, wrap_rev, n_checkpoints, (nt - 2))
wrp.apply_forward()
(f, _) = Loss(rec_g, recin, model.critical_dt, is_residual=is_residual, misfit=misfit)
rec.data[:] = as_tuple(rec_g)[0].data[:]
wrp.apply_reverse()
Iu = getattr(kwu.get('Iu', None), 'data', None)
Iv = getattr(kwg.get('Iv', None), 'data', None)
if return_obj:
return (f, g.data, Iu, Iv)
return (g.data, Iu, Iv) |
class SawyerHandlePressEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.8, (- 0.001))
obj_high = (0.1, 0.9, (+ 0.001))
goal_low = ((- 0.1), 0.55, 0.04)
goal_high = (0.1, 0.7, 0.08)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.9, 0.0]), 'hand_init_pos': np.array((0, 0.6, 0.2))}
self.goal = np.array([0, 0.8, 0.14])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.max_path_length = 150
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_handle_press.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pressDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pressDist, 'epRew': reward, 'pickRew': None, 'success': float((pressDist <= 0.04))}
return (ob, reward, False, info)
def _target_site_config(self):
return []
def _get_pos_objects(self):
return self._get_site_pos('handleStart')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
self.obj_init_pos = (self._get_state_rand_vec() if self.random_init else self.init_config['obj_init_pos'])
self.sim.model.body_pos[self.model.body_name2id('box')] = self.obj_init_pos
self._set_obj_xyz(0)
self._target_pos = self._get_site_pos('goalPress')
self.maxDist = np.abs((self.data.site_xpos[self.model.site_name2id('handleStart')][(- 1)] - self._target_pos[(- 1)]))
self.target_reward = ((1000 * self.maxDist) + (1000 * 2))
return self._get_obs()
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
leftFinger = self._get_site_pos('leftEndEffector')
fingerCOM = leftFinger
pressGoal = self._target_pos[(- 1)]
pressDist = np.abs((objPos[(- 1)] - pressGoal))
reachDist = np.linalg.norm((objPos - fingerCOM))
c1 = 1000
c2 = 0.01
c3 = 0.001
if (reachDist < 0.05):
pressRew = ((1000 * (self.maxDist - pressDist)) + (c1 * (np.exp(((- (pressDist ** 2)) / c2)) + np.exp(((- (pressDist ** 2)) / c3)))))
else:
pressRew = 0
pressRew = max(pressRew, 0)
reward = ((- reachDist) + pressRew)
return [reward, reachDist, pressDist] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.