code stringlengths 101 5.91M |
|---|
def test_exact_values():
with suppress_warnings() as sup:
sup.filter(ConstantWarning)
for key in _cd.exact_values:
assert_((((_cd.exact_values[key][0] - value(key)) / value(key)) == 0)) |
class CiscoUmbrellaVerifyDomain(VirtualFunctionTool):
name = 'CiscoUmbrellaVerifyDomain'
summary = 'Verify a domain by checking if it is safe.'
parameters: List[ArgParameter] = [{'name': 'domain', 'type': 'string', 'description': 'The domain to be verified.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'is_safe', 'type': 'boolean', 'description': 'Whether the domain is safe.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'domain' argument is not a valid domain."}] |
def test_bool():
array = ak.Array([True, False, False, True, True, True])
assert (ak.operations.argsort(array).to_list() == [1, 2, 0, 3, 4, 5])
assert (ak.operations.sort(array).to_list() == [False, False, True, True, True, True]) |
def test_keyword_while():
N.set(128)
A = np.random.rand(N.get()).astype(np.float32)
B = np.zeros((N.get(),), dtype=np.float32)
try:
keyword_while(A, B)
except Exception as e:
print(e)
return False
assert np.allclose(A, B) |
class ApproxGradientBase():
def gradient(self, x: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def __call__(self, x: np.ndarray) -> np.ndarray:
return self.gradient(x) |
def get_scores(count, pred_total, label_total):
if (pred_total != label_total):
return (0, 0, 0)
elif (count == pred_total):
return (1, 1, 1)
return (0, 0, 0) |
_node(optplan.UniformInitializer)
class UniformDistribution():
def __init__(self, params: optplan.UniformInitializer, work: workspace.Workspace) -> None:
self._params = params
def __call__(self, shape: List[int]) -> np.ndarray:
return np.random.uniform(self._params.min_val, self._params.max_val, shape) |
class SquareBall(Ball):
asset = 'square.png'
def create_physical_entity(self):
body = self._engine.CreateDynamicBody(position=self.physical_position, fixedRotation=True)
body.CreatePolygonFixture(box=(((self.width / 2.0) / self._world.physical_scale), ((self.height / 2.0) / self._world.physical_scale)), density=1.0, friction=0.0, restitution=1.0)
return body |
def main(argv=None):
tf.reset_default_graph()
keep_prob = tf.placeholder(tf.float32, name='keep_probabilty')
image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path)
Net.build(image, NUM_CLASSES, keep_prob)
ValidReader = Data_Reader.Data_Reader(Image_Dir, GTLabelDir=Label_Dir, BatchSize=Batch_Size)
sess = tf.Session()
print('Setting up Saver...')
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(logs_dir)
if (ckpt and ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
print('Model restored...')
else:
print((('ERROR NO TRAINED MODEL IN: ' + ckpt.model_checkpoint_path) + 'See TRAIN.py for training'))
sys.exit()
Union = np.float64(np.zeros(len(Classes)))
Intersection = np.float64(np.zeros(len(Classes)))
fim = 0
print((('Start Evaluating intersection over union for ' + str(ValidReader.NumFiles)) + ' images'))
while (ValidReader.itr < ValidReader.NumFiles):
print((str(((fim * 100.0) / ValidReader.NumFiles)) + '%'))
fim += 1
(Images, GTLabels) = ValidReader.ReadNextBatchClean()
PredictedLabels = sess.run(Net.Pred, feed_dict={image: Images, keep_prob: 1.0})
(CIOU, CU) = IOU.GetIOU(PredictedLabels, GTLabels.squeeze(), len(Classes), Classes)
Intersection += (CIOU * CU)
Union += CU
print('Mean Prediction')
print('IOU=Intersection Over Inion')
for i in range(len(Classes)):
if (Union[i] > 0):
print(((Classes[i] + '\t') + str((Intersection[i] / Union[i])))) |
class QuantizedPyTorchModel(PytorchModel):
def __init__(self, graph: common.Graph, append2output=None):
super().__init__(graph, append2output)
def _quantize_node_activations(self, node: BaseNode, input_tensors: List[torch.Tensor]) -> List[torch.Tensor]:
if node.is_activation_quantization_enabled():
if isinstance(input_tensors, list):
input_tensors = torch.cat(input_tensors, dim=0)
return node.final_activation_quantization_cfg.quantize_node_output(input_tensors)
return input_tensors |
.parametrize('flatlist_as_rvec', [False, True])
def test_RegularArray_NumpyArray(flatlist_as_rvec):
v2a = ak.contents.regulararray.RegularArray(ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])), 3)
layout = v2a
generator = ak._connect.cling.togenerator(layout.form, flatlist_as_rvec=flatlist_as_rvec)
lookup = ak._lookup.Lookup(layout, generator)
generator.generate(compiler)
ROOT.gInterpreter.Declare(f'''
void roottest_RegularArray_NumpyArray_v2a_{flatlist_as_rvec}(double* out, ssize_t length, ssize_t* ptrs) {{
auto obj = {generator.dataset()};
out[0] = obj.size();
out[1] = obj[0][0];
out[2] = obj[0][1];
out[3] = obj[1][0];
out[4] = obj[1][1];
out[5] = obj[1].size();
}}
''')
out = np.zeros(6, dtype=np.float64)
getattr(ROOT, f'roottest_RegularArray_NumpyArray_v2a_{flatlist_as_rvec}')(out, len(layout), lookup.arrayptrs)
assert (out.tolist() == [2.0, 0.0, 1.1, 3.3, 4.4, 3.0]) |
class DBPedia(XiangZhangDataset):
dirname = 'dbpedia_csv'
columns = ['class_index', 'title', 'content'] |
def register_Ns3LteRrcSapAntennaInfoUl_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::AntennaInfoUl const &', 'arg0')])
cls.add_instance_attribute('transmissionMode', 'uint8_t', is_const=False)
return |
class ProfileEncoder(nn.Module):
def __init__(self):
super(ProfileEncoder, self).__init__()
self.embed_dim = ENCODER_CONFIG['embed_dim']
self.bbox_embed = Embedder((2 ** BIT), 32)
self.bbox_fc = nn.Sequential(nn.Linear((32 * PROFILE_PARAM_SEQ), self.embed_dim), nn.BatchNorm1d(self.embed_dim), nn.LeakyReLU())
self.pos_embed = PositionalEncoding(max_len=MAX_PROFILE, d_model=self.embed_dim)
encoder_layers = TransformerEncoderLayerImproved(d_model=self.embed_dim, nhead=ENCODER_CONFIG['num_heads'], dim_feedforward=ENCODER_CONFIG['hidden_dim'], dropout=ENCODER_CONFIG['dropout_rate'])
encoder_norm = LayerNorm(self.embed_dim)
self.encoder = TransformerEncoder(encoder_layers, ENCODER_CONFIG['num_layers'], encoder_norm)
commitment_cost = 0.25
decay = 0.99
self.code_dim = self.embed_dim
self.codebook = VectorQuantizerEMA(PROFILE_CODEBOOK_DIM, self.code_dim, commitment_cost, decay)
self.bottleneck = nn.Sequential(nn.Linear(self.embed_dim, self.embed_dim), nn.BatchNorm1d(self.embed_dim), nn.Tanh())
def forward(self, coord, seq_mask):
p_embed = self.bbox_embed(coord).flatten(start_dim=2, end_dim=3)
coord_embed = self.bbox_fc(p_embed.flatten(0, 1)).unflatten(0, (p_embed.shape[0], p_embed.shape[1]))
encoder_input = self.pos_embed(coord_embed.transpose(0, 1))
outputs = self.encoder(src=encoder_input, src_key_padding_mask=seq_mask)
z_mask = (~ seq_mask).transpose(0, 1).unsqueeze(2).repeat(1, 1, self.embed_dim)
avg_z = ((outputs * z_mask).sum(dim=0, keepdim=False) / z_mask.sum(dim=0, keepdim=False))
code_encoded = self.bottleneck(avg_z).unsqueeze(0)
(vq_loss, quantized, encodings_flat, selection) = self.codebook(code_encoded)
latent_code = quantized.transpose(0, 1)
return (latent_code, vq_loss, selection, encodings_flat)
def count_code(self, coord, seq_mask):
p_embed = self.bbox_embed(coord).flatten(start_dim=2, end_dim=3)
coord_embed = self.bbox_fc(p_embed.flatten(0, 1)).unflatten(0, (p_embed.shape[0], p_embed.shape[1]))
encoder_input = self.pos_embed(coord_embed.transpose(0, 1))
outputs = self.encoder(src=encoder_input, src_key_padding_mask=seq_mask)
z_mask = (~ seq_mask).transpose(0, 1).unsqueeze(2).repeat(1, 1, self.embed_dim)
avg_z = ((outputs * z_mask).sum(dim=0, keepdim=False) / z_mask.sum(dim=0, keepdim=False))
code_encoded = self.bottleneck(avg_z).unsqueeze(0)
code_dist = self.codebook.count_code(code_encoded)
return (code_dist, code_encoded) |
class PartitionTuples(UniqueRepresentation, Parent):
def __classcall_private__(klass, level=None, size=None, regular=None):
if ((level is not None) and ((not isinstance(level, (int, Integer))) or (level < 1))):
raise ValueError('the level must be a positive integer')
if ((size is not None) and ((not isinstance(size, (int, Integer))) or (size < 0))):
raise ValueError('the size must be a non-negative integer')
if isinstance(regular, (list, tuple)):
if (level is None):
raise ValueError('When no level is specified, regular must be a positive integer')
if (len(regular) != level):
raise ValueError('regular must be a list of length {}, got {}'.format(level, regular))
if (regular == 0):
raise ValueError('regular must be a positive integer or a tuple of non-negative integers')
if (level is None):
if (size is None):
if (regular is None):
return PartitionTuples_all()
return RegularPartitionTuples_all(regular)
if (regular is None):
return PartitionTuples_size(size)
return RegularPartitionTuples_size(size, regular)
elif (level == 1):
if isinstance(regular, (list, tuple)):
regular = regular[0]
if (size is None):
if ((regular is None) or (regular == 0)):
return _Partitions
return RegularPartitions_all(regular)
if ((regular is None) or (regular == 0)):
return Partitions_n(size)
return RegularPartitions_n(size, regular)
if (regular is not None):
if (not isinstance(regular, (list, tuple))):
regular = ((regular,) * level)
else:
regular = tuple(regular)
if (size is None):
if (regular is None):
return PartitionTuples_level(level)
return RegularPartitionTuples_level(level, regular)
if (regular is None):
return PartitionTuples_level_size(level, size)
return RegularPartitionTuples_level_size(level, size, regular)
Element = PartitionTuple
options = Partitions.options
_level = None
_size = None
def _element_constructor_(self, mu):
if ((mu == []) or (mu == ()) or (mu == [[]])):
if (mu not in self):
raise ValueError('{} is not a {}'.format(mu, self))
return self.element_class(self, [_Partitions([])])
try:
mu = [_Partitions(mu)]
except ValueError:
try:
mu = [_Partitions(nu) for nu in mu]
except ValueError:
raise ValueError('{} is not a {}'.format(mu, self))
if (mu not in self):
raise ValueError('{} is not a {}'.format(mu, self))
return self.element_class(self, mu)
def __contains__(self, mu):
if isinstance(mu, (PartitionTuple, Partition)):
return True
if isinstance(mu, (tuple, list)):
if (not mu):
return True
if (mu[0] in ZZ):
return (mu in _Partitions)
return all(((m in _Partitions) for m in mu))
return False
def __getitem__(self, r):
if isinstance(r, (int, Integer)):
return self.unrank(r)
elif isinstance(r, slice):
start = (0 if (r.start is None) else r.start)
stop = r.stop
if ((stop is None) and (not self.is_finite())):
raise ValueError('infinite set')
else:
raise ValueError('r must be an integer or a slice')
count = 0
parts = []
for t in self:
if (count == stop):
break
if (count >= start):
parts.append(t)
count += 1
if ((count == stop) or (stop is None)):
return parts
raise IndexError('value out of range')
def level(self):
return self._level
def size(self):
return self._size
def _an_element_(self):
return PartitionTuple(([1, 1, 1, 1], [2, 1, 1], [3, 1], [4])) |
class MapFission(transformation.SingleStateTransformation):
map_entry = transformation.PatternNode(nodes.EntryNode)
nested_sdfg = transformation.PatternNode(nodes.NestedSDFG)
def annotates_memlets():
return False
def expressions(cls):
return [sdutil.node_path_graph(cls.map_entry), sdutil.node_path_graph(cls.map_entry, cls.nested_sdfg)]
def _components(subgraph: gr.SubgraphView) -> List[Tuple[(nodes.Node, nodes.Node)]]:
graph = (subgraph if isinstance(subgraph, sd.SDFGState) else subgraph.graph)
schildren = subgraph.scope_children()
ns = [((n, graph.exit_node(n)) if isinstance(n, nodes.EntryNode) else (n, n)) for n in schildren[None] if isinstance(n, (nodes.CodeNode, nodes.EntryNode))]
return ns
def _border_arrays(sdfg, parent, subgraph):
nested = isinstance(parent, sd.SDFGState)
schildren = subgraph.scope_children()
subset = gr.SubgraphView(parent, schildren[None])
if nested:
return set((node.data for node in subset.nodes() if (isinstance(node, nodes.AccessNode) and sdfg.arrays[node.data].transient)))
else:
return set((node.data for node in subset.nodes() if isinstance(node, nodes.AccessNode)))
def _internal_border_arrays(total_components, subgraphs):
inputs = set()
outputs = set()
for (components, subgraph) in zip(total_components, subgraphs):
for (component_in, component_out) in components:
for e in subgraph.in_edges(component_in):
if isinstance(e.src, nodes.AccessNode):
inputs.add(e.src.data)
for e in subgraph.out_edges(component_out):
if isinstance(e.dst, nodes.AccessNode):
outputs.add(e.dst.data)
return (inputs & outputs)
def _outside_map(node, scope_dict, entry_nodes):
while (scope_dict[node] is not None):
if (scope_dict[node] in entry_nodes):
return False
node = scope_dict[node]
return True
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
map_node = self.map_entry
nsdfg_node = None
if sd.has_dynamic_map_inputs(graph, map_node):
return False
if (expr_index == 0):
subgraphs = [graph.scope_subgraph(map_node, include_entry=False, include_exit=False)]
else:
nsdfg_node = dcpy(self.nested_sdfg)
if (len(set((e.dst for e in graph.out_edges(map_node)))) > 1):
return False
cf_comp = helpers.find_sdfg_control_flow(nsdfg_node.sdfg)
if (len(cf_comp) == 1):
child = list(cf_comp.values())[0][1]
conditions = []
if isinstance(child, (cf.ForScope, cf.WhileScope, cf.IfScope)):
conditions.append((child.condition if isinstance(child, (cf.ForScope, cf.IfScope)) else child.test))
for cond in conditions:
if any(((p in cond.get_free_symbols()) for p in map_node.map.params)):
return False
for s in cond.get_free_symbols():
for e in graph.edges_by_connector(self.nested_sdfg, s):
if any(((p in e.data.free_symbols) for p in map_node.map.params)):
return False
if any(((p in cond.get_free_symbols()) for p in map_node.map.params)):
return False
helpers.nest_sdfg_control_flow(nsdfg_node.sdfg, cf_comp)
subgraphs = list(nsdfg_node.sdfg.nodes())
border_arrays = set()
total_components = []
for sg in subgraphs:
components = self._components(sg)
snodes = sg.nodes()
if ((expr_index == 0) and (len(snodes) > 0) and (len(components) <= 1)):
return False
border_arrays |= self._border_arrays((nsdfg_node.sdfg if (expr_index == 1) else sdfg), (sg if (expr_index == 1) else graph), sg)
total_components.append(components)
for array in border_arrays:
if (expr_index == 0):
ndesc = sdfg.arrays[array]
else:
ndesc = nsdfg_node.sdfg.arrays[array]
if (ndesc.transient is False):
return False
if (expr_index == 0):
not_subgraph = set((n.data for n in graph.nodes() if ((n not in snodes) and isinstance(n, nodes.AccessNode))))
not_subgraph.update(set((n.data for s in sdfg.nodes() if (s != graph) for n in s.nodes() if isinstance(n, nodes.AccessNode))))
for (_, component_out) in components:
for e in sg.out_edges(component_out):
if isinstance(e.dst, nodes.AccessNode):
if (e.dst.data in not_subgraph):
return False
return True
def apply(self, graph: sd.SDFGState, sdfg: sd.SDFG):
map_entry = self.map_entry
map_exit = graph.exit_node(map_entry)
nsdfg_node: Optional[nodes.NestedSDFG] = None
if (self.expr_index == 0):
subgraphs = [(graph, graph.scope_subgraph(map_entry, include_entry=False, include_exit=False))]
parent = sdfg
else:
nsdfg_node = self.nested_sdfg
helpers.nest_sdfg_control_flow(nsdfg_node.sdfg)
subgraphs = [(state, state) for state in nsdfg_node.sdfg.nodes()]
parent = nsdfg_node.sdfg
parent_sdfg = parent.parent_sdfg
modified_arrays = set()
outer_map: nodes.Map = map_entry.map
mapsize = outer_map.range.size()
if (self.expr_index == 1):
map_syms = outer_map.range.free_symbols
for edge in graph.out_edges(map_entry):
if edge.data.data:
map_syms.update(edge.data.subset.free_symbols)
if (edge.data.data in parent_sdfg.arrays):
map_syms.update(parent_sdfg.arrays[edge.data.data].free_symbols)
for edge in graph.in_edges(map_exit):
if edge.data.data:
map_syms.update(edge.data.subset.free_symbols)
if (edge.data.data in parent_sdfg.arrays):
map_syms.update(parent_sdfg.arrays[edge.data.data].free_symbols)
for sym in map_syms:
symname = str(sym)
if (symname in outer_map.params):
continue
if (symname not in nsdfg_node.symbol_mapping.keys()):
nsdfg_node.symbol_mapping[symname] = sym
nsdfg_node.sdfg.symbols[symname] = graph.symbols_defined_at(nsdfg_node)[symname]
for name in outer_map.params:
if (str(name) in nsdfg_node.symbol_mapping):
del nsdfg_node.symbol_mapping[str(name)]
if (str(name) in nsdfg_node.sdfg.symbols):
del nsdfg_node.sdfg.symbols[str(name)]
for (state, subgraph) in subgraphs:
components = MapFission._components(subgraph)
sources = subgraph.source_nodes()
sinks = subgraph.sink_nodes()
if (self.expr_index == 0):
external_edges_entry = list(state.out_edges(map_entry))
external_edges_exit = list(state.in_edges(map_exit))
else:
external_edges_entry = [e for e in subgraph.edges() if (isinstance(e.src, nodes.AccessNode) and (not nsdfg_node.sdfg.arrays[e.src.data].transient))]
external_edges_exit = [e for e in subgraph.edges() if (isinstance(e.dst, nodes.AccessNode) and (not nsdfg_node.sdfg.arrays[e.dst.data].transient))]
edge_to_outer = {}
for edge in external_edges_entry:
if (self.expr_index == 0):
path = state.memlet_path(edge)
eindex = path.index(edge)
edge_to_outer[edge] = path[(eindex - 1)]
else:
outer_edge = next((e for e in graph.in_edges(nsdfg_node) if (e.dst_conn == edge.src.data)))
edge_to_outer[edge] = outer_edge
for edge in external_edges_exit:
if (self.expr_index == 0):
path = state.memlet_path(edge)
eindex = path.index(edge)
edge_to_outer[edge] = path[(eindex + 1)]
else:
outer_edge = next((e for e in graph.out_edges(nsdfg_node) if (e.src_conn == edge.dst.data)))
edge_to_outer[edge] = outer_edge
arrays = MapFission._border_arrays((nsdfg_node.sdfg if (self.expr_index == 1) else sdfg), state, subgraph)
scalars = defaultdict(list)
for (_, component_out) in components:
for e in subgraph.out_edges(component_out):
if isinstance(e.dst, nodes.CodeNode):
scalars[e.data.data].append(e)
for (scalar, edges) in scalars.items():
desc = parent.arrays[scalar]
del parent.arrays[scalar]
(name, newdesc) = parent.add_transient(scalar, mapsize, desc.dtype, desc.storage, lifetime=desc.lifetime, debuginfo=desc.debuginfo, allow_conflicts=desc.allow_conflicts, find_new_name=True)
for edge in edges:
anode = state.add_access(name)
sbs = subsets.Range.from_string(','.join(outer_map.params))
sbs.offset([r[0] for r in outer_map.range], True)
state.add_edge(edge.src, edge.src_conn, anode, None, mm.Memlet.simple(name, sbs, num_accesses=outer_map.range.num_elements()))
state.add_edge(anode, None, edge.dst, edge.dst_conn, mm.Memlet.simple(name, sbs, num_accesses=outer_map.range.num_elements()))
state.remove_edge(edge)
new_map_entries = []
for (component_in, component_out) in components:
(me, mx) = state.add_map((outer_map.label + '_fission'), [(p, '0:1') for p in outer_map.params], outer_map.schedule, unroll=outer_map.unroll, debuginfo=outer_map.debuginfo)
for conn in map_entry.in_connectors:
if (not conn.startswith('IN_')):
me.add_in_connector(conn)
me.map.range = dcpy(outer_map.range)
new_map_entries.append(me)
conn_idx = 0
for e in state.in_edges(component_in):
if e.data.data:
in_conn = f'IN_{conn_idx}'
out_conn = f'OUT_{conn_idx}'
conn_idx += 1
me.add_in_connector(in_conn)
me.add_out_connector(out_conn)
else:
in_conn = None
out_conn = None
state.add_edge(me, out_conn, e.dst, e.dst_conn, dcpy(e.data))
if ((self.expr_index == 0) and (e in external_edges_entry)):
state.add_edge(edge_to_outer[e].src, edge_to_outer[e].src_conn, me, in_conn, dcpy(edge_to_outer[e].data))
else:
state.add_edge(e.src, e.src_conn, me, in_conn, dcpy(e.data))
state.remove_edge(e)
if (state.in_degree(component_in) == 0):
state.add_edge(me, None, component_in, None, mm.Memlet())
conn_idx = 0
for e in state.out_edges(component_out):
if e.data.data:
in_conn = f'IN_{conn_idx}'
out_conn = f'OUT_{conn_idx}'
conn_idx += 1
mx.add_in_connector(in_conn)
mx.add_out_connector(out_conn)
else:
in_conn = None
out_conn = None
state.add_edge(e.src, e.src_conn, mx, in_conn, dcpy(e.data))
if ((self.expr_index == 0) and (e in external_edges_exit)):
state.add_edge(mx, out_conn, edge_to_outer[e].dst, edge_to_outer[e].dst_conn, dcpy(edge_to_outer[e].data))
else:
state.add_edge(mx, out_conn, e.dst, e.dst_conn, dcpy(e.data))
state.remove_edge(e)
if (state.out_degree(component_out) == 0):
state.add_edge(component_out, None, mx, None, mm.Memlet())
if (self.expr_index == 0):
for node in sources:
if isinstance(node, nodes.AccessNode):
for edge in state.in_edges(node):
outer_edge = edge_to_outer[edge]
memlet = dcpy(edge.data)
memlet.subset = subsets.Range((outer_map.range.ranges + memlet.subset.ranges))
state.add_edge(outer_edge.src, outer_edge.src_conn, edge.dst, edge.dst_conn, memlet)
for node in sinks:
if isinstance(node, nodes.AccessNode):
for edge in state.out_edges(node):
outer_edge = edge_to_outer[edge]
state.add_edge(edge.src, edge.src_conn, outer_edge.dst, outer_edge.dst_conn, dcpy(outer_edge.data))
for array in arrays:
if (array in modified_arrays):
continue
desc = parent.arrays[array]
if isinstance(desc, dt.Scalar):
desc = dt.Array(desc.dtype, desc.shape, desc.transient, desc.allow_conflicts, desc.storage, desc.location, desc.strides, desc.offset, False, desc.lifetime, 0, desc.debuginfo, desc.total_size, desc.start_offset)
parent.arrays[array] = desc
for sz in reversed(mapsize):
desc.strides = ([desc.total_size] + list(desc.strides))
desc.total_size = (desc.total_size * sz)
desc.shape = (mapsize + list(desc.shape))
desc.offset = (([0] * len(mapsize)) + list(desc.offset))
modified_arrays.add(array)
state.fill_scope_connectors()
if (self.expr_index == 1):
scope_dict = state.scope_dict()
for (k, v) in scope_dict.items():
if (isinstance(k, nodes.MapEntry) and (k in new_map_entries) and (v is None)):
scope_dict[k] = k
to_correct = ([(e, e.src) for e in external_edges_entry] + [(e, e.dst) for e in external_edges_exit])
corrected_nodes = set()
for (edge, node) in to_correct:
if isinstance(node, nodes.AccessNode):
if (node in corrected_nodes):
continue
corrected_nodes.add(node)
outer_edge = edge_to_outer[edge]
desc = parent.arrays[node.data]
outer_desc = sdfg.arrays[outer_edge.data.data]
if isinstance(desc, dt.Scalar):
parent.arrays[node.data] = dcpy(outer_desc)
desc = parent.arrays[node.data]
desc.transient = False
elif isinstance(desc, dt.Array):
desc.shape = outer_desc.shape
desc.strides = outer_desc.strides
desc.total_size = outer_desc.total_size
for internal_edge in state.all_edges(node):
for e in state.memlet_tree(internal_edge):
e.data.subset.offset(desc.offset, False)
e.data.subset = helpers.unsqueeze_memlet(e.data, outer_edge.data).subset
if (not (scope_dict[e.src] and scope_dict[e.dst])):
e.data = propagate_subset([e.data], desc, outer_map.params, outer_map.range)
if isinstance(desc, dt.Array):
desc.offset = outer_desc.offset
for node in subgraph.nodes():
if (isinstance(node, nodes.AccessNode) and (node.data in arrays)):
for edge in state.all_edges(node):
for e in state.memlet_tree(edge):
if (e.data.data == node.data):
if e.data.subset:
e.data.subset = subsets.Range(([((pystr_to_symbolic(d) - r[0]), (pystr_to_symbolic(d) - r[0]), 1) for (d, r) in zip(outer_map.params, outer_map.range)] + e.data.subset.ranges))
elif e.data.other_subset:
e.data.other_subset = subsets.Range(([((pystr_to_symbolic(d) - r[0]), (pystr_to_symbolic(d) - r[0]), 1) for (d, r) in zip(outer_map.params, outer_map.range)] + e.data.other_subset.ranges))
if (self.expr_index == 1):
for edge in graph.in_edges(map_entry):
if ((not edge.dst_conn) or (not edge.dst_conn.startswith('IN_'))):
continue
desc = sdfg.arrays[edge.data.data]
edge.data.subset = subsets.Range.from_array(desc)
edge.data.num_accesses = edge.data.subset.num_elements()
for inner_edge in graph.out_edges_by_connector(map_entry, f'OUT_{edge.dst_conn[3:]}'):
graph.add_edge(edge.src, edge.src_conn, nsdfg_node, inner_edge.dst_conn, dcpy(edge.data))
for edge in graph.out_edges(map_exit):
desc = sdfg.arrays[edge.data.data]
edge.data.subset = subsets.Range.from_array(desc)
for inner_edge in graph.in_edges_by_connector(map_exit, f'IN_{edge.src_conn[4:]}'):
graph.add_edge(nsdfg_node, inner_edge.src_conn, edge.dst, edge.dst_conn, dcpy(edge.data))
graph.remove_nodes_from([map_entry, map_exit])
propagate_memlets_state(sdfg, graph) |
def _sympysage_fresnelc(self):
from sage.functions.error import fresnel_cos
return fresnel_cos(self.args[0]._sage_()) |
def dist_loss(points):
P = points
Pb = P.roll(1, dims=2)
D = ((P - Pb) ** 2)
return torch.sum(D, dim=[(- 2), (- 1)]).mean() |
class Blur(BaseAugmentation):
def _augment(self, img):
return img.filter(ImageFilter.BLUR) |
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None, set_training_mode=True):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for (samples, targets) in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
optimizer.zero_grad()
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if (model_ema is not None):
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def video_aug(videos, video_transform, byte=False):
if byte:
videos = videos.permute(1, 0, 2, 3).byte()
else:
videos = videos.permute(1, 0, 2, 3)
global_videos_tensor = []
(global_transform, local_transform) = video_transform
for i in range(2):
global_videos_tensor.append(global_transform(videos).permute(1, 0, 2, 3))
return global_videos_tensor |
def test_named_record_fields_int32_parameters():
t = RecordType([NumpyType('int32')], ['one'], {'__record__': 'Name', 'p': [123]})
assert (str(parser.parse(str(t))) == str(t)) |
def _match(qs, ks):
qts = tuple(map((lambda x: re.compile((x + '$'))), qs))
for i in range(((len(ks) - len(qs)) + 1)):
matches = [x.match(y) for (x, y) in zip(qts, ks[i:])]
if (matches and all(matches)):
return True
return False |
def get_rule_from_children(p, pos, childen):
phrase = p.text.split(' ')
for (i, c) in enumerate(childen):
start_idx = (c.start_idx - p.start_idx)
end_idx = ((start_idx + c.end_idx) - c.start_idx)
for j in range(start_idx, end_idx):
if (i == 0):
phrase[j] = 'X'
else:
phrase[j] = 'Y'
pos[j] = c.label
for i in range(len(childen)):
if (i == 0):
idxs = [j for (j, x) in enumerate(phrase) if (x == 'X')]
else:
idxs = [j for (j, x) in enumerate(phrase) if (x == 'Y')]
phrase = (phrase[:idxs[0]] + phrase[idxs[(- 1)]:])
pos = (pos[:idxs[0]] + pos[idxs[(- 1)]:])
phrase = ' '.join(phrase)
pos = ' '.join(pos)
return (phrase, pos) |
def format_item_feature(out_file):
print('format_item_feature', ITEMS_FILE, out_file)
item_df = pd.read_csv(ITEMS_FILE, sep='|', header=None, encoding='ISO-8859-1')
item_df = item_df.drop([1, 3, 4], axis=1)
item_df.columns = [IID, 'i_year', 'i_Other', 'i_Action', 'i_Adventure', 'i_Animation', "i_Children's", 'i_Comedy', 'i_Crime', 'i_Documentary ', 'i_Drama ', 'i_Fantasy ', 'i_Film-Noir ', 'i_Horror ', 'i_Musical ', 'i_Mystery ', 'i_Romance ', 'i_Sci-Fi ', 'i_Thriller ', 'i_War ', 'i_Western']
item_df['i_year'] = item_df['i_year'].apply((lambda x: (int(str(x).split('-')[(- 1)]) if pd.notnull(x) else (- 1))))
seps = ([0, 1940, 1950, 1960, 1970, 1980, 1985] + list(range(1990, int((item_df['i_year'].max() + 2)))))
year_dict = {}
for (i, sep) in enumerate(seps[:(- 1)]):
for j in range(seps[i], seps[(i + 1)]):
year_dict[j] = (i + 1)
item_df['i_year'] = item_df['i_year'].apply((lambda x: defaultdict(int, year_dict)[x]))
for c in item_df.columns[2:]:
item_df[c] = (item_df[c] + 1)
item_df.to_csv(out_file, index=False, sep='\t')
return item_df |
class ContainerIO():
def __init__(self, file, offset, length):
self.fh = file
self.pos = 0
self.offset = offset
self.length = length
self.fh.seek(offset)
def isatty(self):
return False
def seek(self, offset, mode=io.SEEK_SET):
if (mode == 1):
self.pos = (self.pos + offset)
elif (mode == 2):
self.pos = (self.length + offset)
else:
self.pos = offset
self.pos = max(0, min(self.pos, self.length))
self.fh.seek((self.offset + self.pos))
def tell(self):
return self.pos
def read(self, n=0):
if n:
n = min(n, (self.length - self.pos))
else:
n = (self.length - self.pos)
if (not n):
return (b'' if ('b' in self.fh.mode) else '')
self.pos = (self.pos + n)
return self.fh.read(n)
def readline(self):
s = (b'' if ('b' in self.fh.mode) else '')
newline_character = (b'\n' if ('b' in self.fh.mode) else '\n')
while True:
c = self.read(1)
if (not c):
break
s = (s + c)
if (c == newline_character):
break
return s
def readlines(self):
lines = []
while True:
s = self.readline()
if (not s):
break
lines.append(s)
return lines |
def tfrecords_single(db):
total_path = os.path.join(DATA_DIR, 'Tfrecords_test', (str(db.base) + db.tfrecords_filename))
writer = tf.python_io.TFRecordWriter(total_path)
print(total_path)
ins_ = {}
outs_ = {}
low = int((db.base * db.base_step))
high = min(((db.base + 1) * db.base_step), db.num_instance)
print(('low:%f high:%f' % (low, high)))
for idx in xrange(low, high):
print(db._ins_path['1framexyz'][idx])
ins_['1framexyz'] = load_xyz(db._ins_path['1framexyz'][idx]).astype(np.float32)
ins_['2framexyz'] = load_xyz(db._ins_path['2framexyz'][idx]).astype(np.float32)
ins_['1framergb'] = load_rgb(db._ins_path['1framergb'][idx]).astype(np.float32)
ins_['2framergb'] = load_rgb(db._ins_path['2framergb'][idx]).astype(np.float32)
outs_['2framexyz'] = load_seg(db._outs_path['2framexyz'][idx]).astype(np.float32)
outs_['2framer'] = load_boundary(db._outs_path['boundary'][idx]).astype(np.float32)
outs_['2framescore'] = load_score(db._outs_path['2framescore'][idx]).astype(np.float32)
outs_['flow'] = load_flow(db._outs_path['top_dir'][idx]).astype(np.float32)
outs_['end_center'] = load_end_center(db._outs_path['end_center'][idx]).astype(np.float32)
outs_['transl'] = load_transl(db._outs_path['transl'][idx]).astype(np.float32)
outs_['rot'] = load_rot(db._outs_path['rot'][idx]).astype(np.float32)
instance_id = int(db._ins_ids[idx])
print(('instance_id %d ' % instance_id))
ins_1frame_rgb = ins_['1framergb'].tostring()
ins_2frame_rgb = ins_['2framergb'].tostring()
ins_1frame_xyz = ins_['1framexyz'].tostring()
ins_2frame_xyz = ins_['2framexyz'].tostring()
outs_2frame_xyz = outs_['2framexyz'].tostring()
outs_2frame_r = outs_['2framer'].tostring()
outs_2frame_score = outs_['2framescore'].tostring()
outs_flow = outs_['flow'].tostring()
outs_end_center = outs_['end_center'].tostring()
outs_transl = outs_['transl'].tostring()
outs_rot = outs_['rot'].tostring()
example = tf.train.Example(features=tf.train.Features(feature={'instance_id': _int64_feature(instance_id), 'in_1frame_xyz': _bytes_feature(ins_1frame_xyz), 'in_2frame_xyz': _bytes_feature(ins_2frame_xyz), 'in_1frame_rgb': _bytes_feature(ins_1frame_rgb), 'in_2frame_rgb': _bytes_feature(ins_2frame_rgb), 'outs_2frame_xyz': _bytes_feature(outs_2frame_xyz), 'outs_2frame_r': _bytes_feature(outs_2frame_r), 'outs_2frame_score': _bytes_feature(outs_2frame_score), 'outs_end_center': _bytes_feature(outs_end_center), 'outs_transl': _bytes_feature(outs_transl), 'outs_rot': _bytes_feature(outs_rot), 'outs_flow': _bytes_feature(outs_flow)}))
writer.write(example.SerializeToString())
writer.close() |
class ScorerTest(unittest.TestCase):
def _get_labels(self) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
golds = np.array([1, 0, 1, 0, 1])
preds = np.array([1, 0, 1, 1, 0])
probs = np.array([0.8, 0.6, 0.9, 0.7, 0.4])
return (golds, preds, probs)
def test_scorer(self) -> None:
def pred_sum(golds, preds, probs):
return np.sum(preds)
scorer = Scorer(metrics=['accuracy', 'f1'], custom_metric_funcs=dict(pred_sum=pred_sum))
results = scorer.score(*self._get_labels())
results_expected = dict(accuracy=0.6, f1=(2 / 3), pred_sum=3)
self.assertEqual(results, results_expected)
def test_dict_metric(self) -> None:
def dict_metric(golds, preds, probs):
return dict(a=1, b=2)
scorer = Scorer(custom_metric_funcs=dict(dict_metric=dict_metric))
results = scorer.score(*self._get_labels())
results_expected = dict(a=1, b=2)
self.assertEqual(results, results_expected)
def test_invalid_metric(self) -> None:
with self.assertRaisesRegex(ValueError, 'Unrecognized metric'):
Scorer(metrics=['accuracy', 'f2'])
def test_no_metrics(self) -> None:
scorer = Scorer()
self.assertEqual(scorer.score(*self._get_labels()), {})
def test_no_labels(self) -> None:
scorer = Scorer()
with self.assertRaisesRegex(ValueError, 'Cannot score'):
scorer.score([], [], [])
def test_no_probs(self) -> None:
scorer = Scorer()
(golds, preds, probs) = self._get_labels()
self.assertEqual(scorer.score(golds, preds), scorer.score(golds, preds, probs))
def test_abstain_labels(self) -> None:
golds = np.array([1, 0, 1, 0, (- 1)])
preds = np.array([1, 0, 1, 1, 0])
probs = np.array([0.8, 0.6, 0.9, 0.7, 0.4])
scorer = Scorer(metrics=['accuracy'], abstain_label=None)
results = scorer.score(golds, preds, probs)
results_expected = dict(accuracy=0.6)
self.assertEqual(results, results_expected)
scorer = Scorer(metrics=['accuracy'], abstain_label=(- 1))
results = scorer.score(golds, preds, probs)
results_expected = dict(accuracy=0.75)
self.assertEqual(results, results_expected)
abstain_preds = np.array([(- 1), (- 1), 1, 1, 0])
results = scorer.score(golds, abstain_preds)
results_expected = dict(accuracy=0.5)
self.assertEqual(results, results_expected)
scorer = Scorer(metrics=['coverage'], abstain_label=(- 1))
results = scorer.score(golds, abstain_preds)
results_expected = dict(coverage=0.6)
self.assertEqual(results, results_expected)
scorer = Scorer(metrics=['accuracy'], abstain_label=10)
results = scorer.score(golds, preds, probs)
results_expected = dict(accuracy=0.6)
self.assertEqual(results, results_expected)
def test_score_slices(self):
DATA = [5, 10, 19, 22, 25]
_function()
def sf(x):
return (x.num < 20)
golds = np.array([0, 1, 0, 1, 0])
preds = np.array([0, 0, 0, 0, 0])
probs = preds_to_probs(preds, 2)
data = [SimpleNamespace(num=x) for x in DATA]
S = SFApplier([sf]).apply(data)
scorer = Scorer(metrics=['accuracy'])
metrics = scorer.score(golds=golds, preds=preds, probs=probs)
self.assertEqual(metrics['accuracy'], 0.6)
slice_metrics = scorer.score_slices(S=S, golds=golds, preds=preds, probs=probs)
self.assertEqual(slice_metrics['overall']['accuracy'], 0.6)
self.assertEqual(slice_metrics['sf']['accuracy'], (2.0 / 3.0))
metrics_df = scorer.score_slices(S=S, golds=golds, preds=preds, probs=probs, as_dataframe=True)
self.assertTrue(isinstance(metrics_df, pd.DataFrame))
self.assertEqual(metrics_df['accuracy']['overall'], 0.6)
self.assertEqual(metrics_df['accuracy']['sf'], (2.0 / 3.0))
with self.assertRaisesRegex(ValueError, 'must have the same number of elements'):
scorer.score_slices(S=S, golds=golds[:1], preds=preds, probs=probs, as_dataframe=True) |
class TACC():
def __init__(self, lag):
self.lag = lag
self.k = 3
check_acc(self.lag, self.k)
def make_vec(self, input_data, phyche_index=None, all_property=False, extra_phyche_index=None):
(sequence_list, phyche_value) = ready_acc(input_data, self.k, phyche_index, all_property, extra_phyche_index)
zipped = list(zip(make_ac_vector(sequence_list, self.lag, phyche_value, self.k), make_cc_vector(sequence_list, self.lag, phyche_value, self.k)))
vector = [reduce((lambda x, y: (x + y)), e) for e in zipped]
return vector |
def build_net(inputs):
net = Conv2D(32, 3, strides=2, activation='relu')(inputs)
net = Conv2D(32, 3, strides=2, activation='relu')(net)
net = Flatten()(net)
net = Dense(128, activation='relu')(net)
net = Dense(10, activation='softmax')(net)
return net |
def load_data(fr_rating):
rating_data = {}
for line in fr_rating:
lines = line.split('\t')
user = lines[0]
item = lines[1]
time = lines[3].replace('\n', '')
item_list = []
if (user in rating_data):
rating_data[user].update({item: time})
else:
rating_data.update({user: {item: time}})
return rating_data |
def _generate_triangle_mask(point, image, shape, random):
if ((shape[0] == 1) or (shape[1] == 1)):
raise ValueError('dimension must be > 1 for triangles')
available_side = (min((image[1] - point[1]), point[0], shape[1]) - shape[0])
side = ((shape[0] + random.integers(max(1, available_side))) - 1)
triangle_height = int(np.ceil((np.sqrt((3 / 4.0)) * side)))
triangle = draw_polygon([point[0], (point[0] - triangle_height), point[0]], [point[1], (point[1] + (side // 2)), (point[1] + side)])
label = ('triangle', (((point[0] - triangle_height), (point[0] + 1)), (point[1], ((point[1] + side) + 1))))
return (triangle, label) |
class Algo(abc.ABC):
def train(self, batch, **kwargs):
def _train_step(self, train_state, target_params, rng, batch, **kwargs):
def model_keys(self):
def train_states(self):
def train_params(self):
return {key: self.train_states[key].params for key in self.model_keys}
def total_steps(self): |
class Gamma0_class(GammaH_class):
def __init__(self, level):
CongruenceSubgroup.__init__(self, level)
def _repr_(self):
return ('Congruence Subgroup Gamma0(%s)' % self.level())
def __reduce__(self):
return (Gamma0_constructor, (self.level(),))
def _latex_(self):
return ('\\Gamma_0(%s)' % self.level())
_method
def _generators_for_H(self):
if (self.level() in [1, 2]):
return []
return [ZZ(x) for x in IntegerModRing(self.level()).unit_gens()]
_method
def _list_of_elements_in_H(self):
N = self.level()
if (N != 1):
H = [x for x in range(1, N) if (gcd(x, N) == 1)]
else:
H = [1]
return H
def divisor_subgroups(self):
return [Gamma0_constructor(M) for M in self.level().divisors()]
def is_even(self):
return True
def is_subgroup(self, right):
if (right.level() == 1):
return True
if is_Gamma0(right):
return ((self.level() % right.level()) == 0)
if is_Gamma1(right):
if (right.level() >= 3):
return False
elif (right.level() == 2):
return (self.level() == 2)
else:
return GammaH_class.is_subgroup(self, right)
def coset_reps(self):
from .all import SL2Z
N = self.level()
if (N == 1):
(yield SL2Z([1, 0, 0, 1]))
else:
for z in P1List(N):
(yield SL2Z(lift_to_sl2z(z[0], z[1], N)))
_method
def generators(self, algorithm='farey'):
if (self.level() == 1):
return [self([0, (- 1), 1, 0]), self([1, 1, 0, 1])]
elif (algorithm == 'farey'):
return self.farey_symbol().generators()
elif (algorithm == 'todd-coxeter'):
from sage.modular.modsym.p1list import P1List
from .congroup import generators_helper
level = self.level()
if (level == 1):
return [self([0, (- 1), 1, 0]), self([1, 1, 0, 1])]
gen_list = generators_helper(P1List(level), level)
return [self(g, check=False) for g in gen_list]
else:
raise ValueError(("Unknown algorithm '%s' (should be either 'farey' or 'todd-coxeter')" % algorithm))
def gamma_h_subgroups(self):
from .all import GammaH
N = self.level()
R = IntegerModRing(N)
return [GammaH(N, H) for H in R.multiplicative_subgroups()]
def _contains_sl2(self, a, b, c, d):
return ((c % self.level()) == 0)
def _find_cusps(self):
N = self.level()
s = []
for d in divisors(N):
w = gcd(d, (N // d))
if (w == 1):
if (d == 1):
s.append(Cusp(1, 0))
elif (d == N):
s.append(Cusp(0, 1))
else:
s.append(Cusp(1, d))
else:
for a in range(1, w):
if (gcd(a, w) == 1):
while (gcd(a, (d // w)) != 1):
a += w
s.append(Cusp(a, d))
return sorted(s)
def ncusps(self):
n = self.level()
return sum((euler_phi(gcd(d, (n // d))) for d in n.divisors()))
def nu2(self):
n = self.level()
if ((n % 4) == 0):
return ZZ(0)
return prod([(1 + kronecker_symbol((- 4), p)) for (p, _) in n.factor()])
def nu3(self):
n = self.level()
if ((n % 9) == 0):
return ZZ(0)
return prod([(1 + kronecker_symbol((- 3), p)) for (p, _) in n.factor()])
def index(self):
return prod([((p ** e) + (p ** (e - 1))) for (p, e) in self.level().factor()])
def dimension_new_cusp_forms(self, k=2, p=0):
N = self.level()
k = ZZ(k)
if (not ((p == 0) or (N % p))):
return (self.dimension_cusp_forms(k) - (2 * self.restrict((N // p)).dimension_new_cusp_forms(k)))
if ((k < 2) or (k % 2)):
return ZZ.zero()
factors = list(N.factor())
def s0(q, a):
if (a == 1):
return (1 - (1 / q))
elif (a == 2):
return ((1 - (1 / q)) - (1 / (q ** 2)))
else:
return ((1 - (1 / q)) * (1 - (1 / (q ** 2))))
def vinf(q, a):
if (a % 2):
return 0
elif (a == 2):
return (q - 2)
else:
return ((q ** ((a / 2) - 2)) * ((q - 1) ** 2))
def v2(q, a):
if ((q % 4) == 1):
if (a == 2):
return (- 1)
else:
return 0
elif ((q % 4) == 3):
if (a == 1):
return (- 2)
elif (a == 2):
return 1
else:
return 0
elif (a in (1, 2)):
return (- 1)
elif (a == 3):
return 1
else:
return 0
def v3(q, a):
if ((q % 3) == 1):
if (a == 2):
return (- 1)
else:
return 0
elif ((q % 3) == 2):
if (a == 1):
return (- 2)
elif (a == 2):
return 1
else:
return 0
elif (a in (1, 2)):
return (- 1)
elif (a == 3):
return 1
else:
return 0
res = ((((k - 1) / 12) * N) * prod((s0(q, a) for (q, a) in factors)))
res -= (prod((vinf(q, a) for (q, a) in factors)) / ZZ(2))
res += ((((1 - k) / 4) + (k // 4)) * prod((v2(q, a) for (q, a) in factors)))
res += ((((1 - k) / 3) + (k // 3)) * prod((v3(q, a) for (q, a) in factors)))
if (k == 2):
res += moebius(N)
return res |
class L2Loss(nn.Module):
def __init__(self, args):
super(L2Loss, self).__init__()
self.args = args
self.loss = L2()
self.loss_labels = ['L2', 'EPE']
def forward(self, output, target):
lossvalue = self.loss(output, target)
epevalue = EPE(output, target)
return [lossvalue, epevalue] |
def FiBiNET(linear_feature_columns, dnn_feature_columns, bilinear_type='interaction', reduction_ratio=3, dnn_hidden_units=(256, 128, 64), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', task='binary'):
features = build_input_features((linear_feature_columns + dnn_feature_columns))
inputs_list = list(features.values())
linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear', l2_reg=l2_reg_linear)
(sparse_embedding_list, dense_value_list) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding, seed)
senet_embedding_list = SENETLayer(reduction_ratio, seed)(sparse_embedding_list)
senet_bilinear_out = BilinearInteraction(bilinear_type=bilinear_type, seed=seed)(senet_embedding_list)
bilinear_out = BilinearInteraction(bilinear_type=bilinear_type, seed=seed)(sparse_embedding_list)
dnn_input = combined_dnn_input([Flatten()(concat_func([senet_bilinear_out, bilinear_out]))], dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, False, seed=seed)(dnn_input)
dnn_logit = Dense(1, use_bias=False)(dnn_out)
final_logit = add_func([linear_logit, dnn_logit])
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model |
def top_level_type(model: optplan.optplan.ProblemGraphNode) -> str:
return model.type.split('.')[0] |
class InputFeatures(object):
def __init__(self, input_ids, attention_mask, token_type_ids, label, pairID=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.pairID = pairID
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n') |
def mahalanobis(u, v, VI):
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = (u - v)
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m) |
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
def __init__(self, *args, **kwargs):
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = (get_terminal_size()[0] - 2)
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option)
def _format_option_strings(self, option, mvarfmt=' <{}>', optsep=', '):
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if (len(opts) > 1):
opts.insert(1, optsep)
if option.takes_value():
metavar = (option.metavar or option.dest.lower())
opts.append(mvarfmt.format(metavar.lower()))
return ''.join(opts)
def format_heading(self, heading):
if (heading == 'Options'):
return ''
return (heading + ':\n')
def format_usage(self, usage):
msg = '\nUsage: {}\n'.format(self.indent_lines(textwrap.dedent(usage), ' '))
return msg
def format_description(self, description):
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
description = description.lstrip('\n')
description = description.rstrip()
description = self.indent_lines(textwrap.dedent(description), ' ')
description = '{}:\n{}\n'.format(label, description)
return description
else:
return ''
def format_epilog(self, epilog):
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [(indent + line) for line in text.split('\n')]
return '\n'.join(new_lines) |
def asarray(obj, itemsize=None, unicode=None, order=None):
return array(obj, itemsize, copy=False, unicode=unicode, order=order) |
def edit_filename(filename, prefix='', suffix='', new_ext=None):
(path, filename) = os.path.split(filename)
(base, ext) = os.path.splitext(filename)
if (new_ext is None):
new_filename = (((prefix + base) + suffix) + ext)
else:
new_filename = (((prefix + base) + suffix) + new_ext)
return os.path.join(path, new_filename) |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_mu_nid(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format in {'compact', 'standard'}):
result = ([nid.compact(val)] + result)
elif (output_format == 'birthdate'):
result = ([nid._get_date(val)] + result)
return result |
_utils.test(arch=get_host_arch_list())
def test_unpack_mismatch_tuple():
a = ti.field(ti.f32, ())
b = ti.field(ti.f32, ())
list = [2, 3, 4]
def func():
(a[None], b[None]) = list
with pytest.raises(ti.TaichiCompilationError):
func() |
class ArgNode(ASTNode):
def __init__(self, val, data_type, fields):
super().__init__('ARG', val, data_type, fields)
def textual_form_core(self):
prompt = ('with most' if (self.val == 'ARGMAX') else 'with least')
return ' '.join([self.fields[0].textual_form(), prompt, self.fields[1].textual_form()]) |
def build_lr_scheduler(cfg, optimizer):
scheduler_args = {'optimizer': optimizer, 'warmup_factor': cfg.SOLVER.WARMUP_FACTOR, 'warmup_epochs': cfg.SOLVER.WARMUP_EPOCHS, 'warmup_method': cfg.SOLVER.WARMUP_METHOD, 'milestones': cfg.SOLVER.STEPS, 'gamma': cfg.SOLVER.GAMMA, 'max_iters': cfg.SOLVER.MAX_ITER, 'delay_iters': cfg.SOLVER.DELAY_ITERS, 'eta_min_lr': cfg.SOLVER.ETA_MIN_LR}
scheduler = getattr(lr_scheduler, cfg.SOLVER.SCHED)(**scheduler_args)
return {'scheduler': scheduler, 'interval': cfg.SOLVER.INTERVAL} |
class SimpleCustomBatch(object):
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return (self.inp.is_pinned() and self.tgt.is_pinned()) |
class InferenceModel(Pix2PixHDModel):
def forward(self, inp):
label = inp
return self.inference(label) |
def get_peft_state_non_lora(named_params) -> Dict:
to_return = {k: t for (k, t) in named_params if (('lora_' not in k) and (t.requires_grad or ('_lmm_projector' in k)))}
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for (k, v) in to_return.items()}
return to_return |
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if (lmbd_int == 0):
return 1.0
(mu0, _, mu) = distributions_mp(sigma, q)
b_lambda_fn = (lambda z: (mu0(z) * ((mu0(z) / mu(z)) ** lmbd_int)))
b_lambda = integral_inf_mp(b_lambda_fn)
m = ((sigma ** 2) * (mp.log(((2 - q) / (1 - q))) + (1 / (2 * (sigma ** 2)))))
b_fn = (lambda z: (((mu0(z) / mu(z)) ** lmbd_int) - ((mu((- z)) / mu0(z)) ** lmbd_int)))
if verbose:
print('M =', m)
print('f(-M) = {} f(M) = {}'.format(b_fn((- m)), b_fn(m)))
assert ((b_fn((- m)) < 0) and (b_fn(m) < 0))
b_lambda_int1_fn = (lambda z: (mu0(z) * ((mu0(z) / mu(z)) ** lmbd_int)))
b_lambda_int2_fn = (lambda z: (mu0(z) * ((mu(z) / mu0(z)) ** lmbd_int)))
b_int1 = integral_bounded_mp(b_lambda_int1_fn, (- m), m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, (- m), m)
a_lambda_m1 = compute_a_mp(sigma, q, (lmbd - 1))
b_bound = ((a_lambda_m1 + b_int1) - b_int2)
if verbose:
print('B by numerical integration', b_lambda)
print('B must be no more than ', b_bound)
assert (b_lambda < (b_bound + 1e-05))
return _to_np_float64(b_lambda) |
class TestReduceSum(object):
def test(self):
correct = np.array([(- 2), 2, 21])
with clean_session():
array = tf.constant([[1, (- 8), 5, 4, 9], [0, 2, 7, 8, 1], [2, (- 8), 6, 4, 9]], dtype=tf.float32)
mask = tf.constant([[1, 1, 1, 0, 0], [1, 1, 0, 0, 0], [1, 0, 1, 1, 1]], dtype=tf.float32)
result = reduce_sum(SequenceBatch(array, mask))
assert_almost_equal(result.eval(), correct, decimal=5) |
def combine_partial_results(partial_results) -> List:
records = []
for partial_result in partial_results:
records.extend(partial_result)
records = sorted(records, key=(lambda x: x['id']))
preds = [x['pred'] for x in records]
return preds |
def stretch_audio(x, rate, window_size=512):
c = lr.stft(x, n_fft=window_size, hop_length=(window_size // 4), win_length=window_size)
re = interpolation.zoom(c.real, zoom=(1, rate))
im = interpolation.zoom(c.imag, zoom=(1, rate))
w = lr.istft((re + (im * 1j)), hop_length=(window_size // 4), win_length=window_size)
return w |
def accuracy(output, target, topk=(1,), avg=False):
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
if avg:
res.append(correct_k.mul_((100.0 / batch_size)))
else:
res.append(correct_k)
return res |
def check_real_value(f, x1, y1, x, exact=True):
z1 = np.array([complex(x1, y1)])
if exact:
assert_equal(f(z1), x)
else:
assert_almost_equal(f(z1), x) |
def _call_torchmetrics(metric: retrieval_metrics.RetrievalMetric, scores, query2target_idx, **kwargs):
(preds, target, indexes) = _prepare_torchmetrics_input(scores, query2target_idx)
return metric(preds, target, indexes=indexes, **kwargs).item() |
def plot_likelihood_BO_limit(likelihood):
df = check_likelihood_BO_limit(likelihood)
(fig, axs) = plt.subplots(1, 3, figsize=(12, 4), sharex=True)
axs[0].plot(df['mz_hat'], df['A_BO'], '-', label='$A \\quad BO$')
axs[0].plot(df['mz_hat'], df['A_RS'], '--', label='$A \\quad RS$')
axs[0].set(xlabel='$\\widehat{m}_z^+$')
axs[0].legend()
axs[1].plot(df['mz_hat'], df['vz_BO'], '-', label='$v_z \\quad BO$')
axs[1].plot(df['mz_hat'], df['vz_RS'], '--', label='$v_z \\quad RS$')
axs[1].set(xlabel='$\\widehat{m}_z^+$')
axs[1].legend()
axs[2].plot(df['mz_hat'], df['mz_BO'], '-', label='$m_z \\quad BO$')
axs[2].plot(df['mz_hat'], df['mz_RS'], '--', label='$m_z \\quad RS$')
axs[2].plot(df['mz_hat'], df['qz_RS'], 'x', label='$q_z \\quad RS$')
axs[2].set(xlabel='$\\widehat{m}_z^+$')
axs[2].legend()
fig.suptitle(likelihood)
fig.tight_layout(rect=[0, 0.03, 1, 0.95]) |
def normalize_sentence(sentence):
sentence = sentence.upper()
sentence = jiwer.RemovePunctuation()(sentence)
sentence = jiwer.RemoveWhiteSpace(replace_by_space=True)(sentence)
sentence = jiwer.RemoveMultipleSpaces()(sentence)
sentence = jiwer.Strip()(sentence)
sentence = sentence.upper()
return sentence |
class ChangeAmplitude(object):
def __init__(self, amplitude_range=(0.7, 1.1)):
self.amplitude_range = amplitude_range
def __call__(self, data):
if (not should_apply_transform()):
return data
data = (data * random.uniform(*self.amplitude_range))
return data |
class StanfordURLTitleModel(Coref, StanfordModel):
def __init__(self, model, debug=False):
self.model = model
self.debug = debug
def predict(self, text, a, b, pronoun_offset, a_offset, b_offset, url, id, debug=False, **kwargs):
(doc, tokens, pronoun_offset, a_offset, b_offset, a_span, b_span, pronoun_token, a_tokens, b_tokens) = self.tokenize(text, a, b, pronoun_offset, a_offset, b_offset, **kwargs)
clusters = []
title = os.path.basename(urllib.parse.unquote(url)).replace('_', ' ')
title = [token for token in self.model.tokenize(title)]
try:
trees = self.model.parse_text(text)
graph = parse_tree_to_graph(trees, doc)
for token in a_tokens:
token.syn_dist = get_syntactical_distance_from_graph(graph, token, pronoun_token)
for token in b_tokens:
token.syn_dist = get_syntactical_distance_from_graph(graph, token, pronoun_token)
candidate_a = sorted(filter((lambda token: (token.text in title)), a_tokens), key=(lambda token: token.syn_dist))
candidate_b = sorted(filter((lambda token: (token.text in title)), b_tokens), key=(lambda token: token.syn_dist))
if (len(candidate_a) and len(candidate_b) and (candidate_a[0].syn_dist == candidate_b[0].syn_dist)):
if debug:
print('{}, Both mentions have the same syntactic distance'.format(id))
else:
candidates = sorted((candidate_a + candidate_b), key=(lambda token: token.syn_dist))
if len(candidates):
candidate = candidates[0]
clusters.append([[pronoun_offset, pronoun_offset], [candidate.i, candidate.i]])
except Exception as e:
print('{}, {}'.format(id, e))
token_to_char_mapping = [token.idx for token in doc]
return (tokens, clusters, pronoun_offset, a_span, b_span, token_to_char_mapping) |
def random_length(minlen=0, maxlen=None):
if (maxlen is None):
return (minlen + int(math.floor(random.expovariate(0.1))))
else:
return random.randint(minlen, maxlen) |
def match_vert_lists(short_list, long_list):
match_list = []
idx_short = 0
for idx_long in range(len(long_list)):
long_vertex = long_list[idx_long]
short_vertex = short_list[idx_short]
if all(np.isclose(short_vertex, long_vertex, atol=1e-05)):
match_list.append(idx_long)
idx_short += 1
if (idx_short >= len(short_list)):
break
if (len(match_list) != len(short_list)):
raise ValueError('Vertex matching unsuccessfull: matched {} of {} vertices in short list'.format(len(match_list), len(short_list)))
return match_list |
class BinaryActivation(nn.Module):
def __init__(self):
super(BinaryActivation, self).__init__()
def forward(self, x):
out_forward = torch.sign(x)
mask1 = (x < (- 1))
mask2 = (x < 0)
mask3 = (x < 1)
out1 = (((- 1) * mask1.type(torch.float32)) + (((x * x) + (2 * x)) * (1 - mask1.type(torch.float32))))
out2 = ((out1 * mask2.type(torch.float32)) + ((((- x) * x) + (2 * x)) * (1 - mask2.type(torch.float32))))
out3 = ((out2 * mask3.type(torch.float32)) + (1 * (1 - mask3.type(torch.float32))))
out = ((out_forward.detach() - out3.detach()) + out3)
return out |
def gen_config(seq_name, label_id):
seq_home = '../DAVIS/trainval'
save_home = '../result_davis_fig'
result_home = '../result_davis'
label_id = int(label_id)
img_dir = os.path.join(seq_home, 'JPEGImages/480p', seq_name)
img_list = sorted(glob.glob(os.path.join(img_dir, '*.jpg')))
gt_path = os.path.join(seq_home, 'Annotations/480p', seq_name, '00000.png')
init_bbox = cross2otb(np.array(get_mask_bbox((np.array(Image.open(gt_path)) == int(label_id)))))
savefig_dir = os.path.join(save_home, seq_name, str(label_id))
result_dir = os.path.join(result_home, seq_name, str(label_id))
os.makedirs(savefig_dir, exist_ok=True)
os.makedirs(result_dir, exist_ok=True)
return (img_list, init_bbox, savefig_dir, result_dir) |
def _get_box_class_field(eval_boxes: EvalBoxes) -> str:
assert (len(eval_boxes.boxes) > 0)
box = None
for val in eval_boxes.boxes.values():
if (len(val) > 0):
box = val[0]
break
if isinstance(box, DetectionBox):
class_field = 'detection_name'
elif isinstance(box, TrackingBox):
class_field = 'tracking_name'
else:
raise Exception(('Error: Invalid box type: %s' % box))
return class_field |
def run_evaluation(args, data, knowledge):
results = [{'id': data[i]['id']} for i in range(len(data))]
if args.run_factual:
print('Running Factualness Evaluation...')
(factual_s, meta_data) = factual_scores(args.factual_method, data, knowledge, args.use_cuda, args.gpu_device)
for (i, x) in enumerate(factual_s):
results[i]['factualness_score'] = x
if ('aspect_explanation' in meta_data.keys()):
results[i].setdefault('aspect_explanation', {})['factual'] = meta_data['aspect_explanation'][i]
if args.run_safety:
print('Running Safety Evaluation...')
(safety_s, meta_data) = safety_scores(args.safety_method, data, knowledge, args.batch_size, args.use_cuda)
for (i, x) in enumerate(safety_s):
results[i]['safety_score'] = x
if ('all_scores' in meta_data.keys()):
results[i]['safety_meta'] = meta_data['all_scores'][i]
if ('aspect_explanation' in meta_data.keys()):
results[i].setdefault('aspect_explanation', {})['safety'] = meta_data['aspect_explanation'][i]
if args.run_constraint:
print('Running Constraint Evaluation...')
(constraint_s, meta_data) = constraint_scores(args.constraint_method, data, knowledge)
for (i, x) in enumerate(constraint_s):
results[i]['constraint_score'] = x
if ('aspect_explanation' in meta_data.keys()):
results[i].setdefault('aspect_explanation', {})['constraint'] = meta_data['aspect_explanation'][i]
return results |
class TestTransforms(unittest.TestCase):
def setUp(self):
setup_logger()
def test_apply_rotated_boxes(self):
np.random.seed(125)
cfg = get_cfg()
is_train = True
transform_gen = detection_utils.build_transform_gen(cfg, is_train)
image = np.random.rand(200, 300)
(image, transforms) = T.apply_transform_gens(transform_gen, image)
image_shape = image.shape[:2]
assert (image_shape == (800, 1200))
annotation = {'bbox': [179, 97, 62, 40, (- 56)]}
boxes = np.array([annotation['bbox']], dtype=np.float64)
transformed_bbox = transforms.apply_rotated_box(boxes)[0]
expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64)
err_msg = 'transformed_bbox = {}, expected {}'.format(transformed_bbox, expected_bbox)
assert np.allclose(transformed_bbox, expected_bbox), err_msg
def test_apply_rotated_boxes_unequal_scaling_factor(self):
np.random.seed(125)
(h, w) = (400, 200)
(newh, neww) = (800, 800)
image = np.random.rand(h, w)
transform_gen = []
transform_gen.append(T.Resize(shape=(newh, neww)))
(image, transforms) = T.apply_transform_gens(transform_gen, image)
image_shape = image.shape[:2]
assert (image_shape == (newh, neww))
boxes = np.array([[150, 100, 40, 20, 0], [150, 100, 40, 20, 30], [150, 100, 40, 20, 90], [150, 100, 40, 20, (- 90)]], dtype=np.float64)
transformed_boxes = transforms.apply_rotated_box(boxes)
expected_bboxes = np.array([[600, 200, 160, 40, 0], [600, 200, 144., 52., 49.], [600, 200, 80, 80, 90], [600, 200, 80, 80, (- 90)]], dtype=np.float64)
err_msg = 'transformed_boxes = {}, expected {}'.format(transformed_boxes, expected_bboxes)
assert np.allclose(transformed_boxes, expected_bboxes), err_msg
def test_print_transform_gen(self):
t = T.RandomCrop('relative', (100, 100))
self.assertTrue((str(t) == "RandomCrop(crop_type='relative', crop_size=(100, 100))"))
t = T.RandomFlip(prob=0.5)
self.assertTrue((str(t) == 'RandomFlip(prob=0.5)'))
t = T.RandomFlip()
self.assertTrue((str(t) == 'RandomFlip()')) |
class JsTracerTable():
events: list
timestamps: list
durations: list
line: list
column: list
length: int |
def optimization_command(args):
input_file = args.input_file[0]
output_file = args.output_file[0]
ext = os.path.splitext(input_file)[1]
if (ext == '.pb'):
if (os.path.splitext(output_file)[1] != '.pb'):
raise ValueError('Input or output file format error.')
optimize_pb_model_command(input_file, output_file)
elif (ext == '.nnp'):
if (os.path.splitext(output_file)[1] != '.nnp'):
raise ValueError('Input or output file format error.')
optimize_nnp_model_command(input_file, output_file)
else:
raise ValueError(f'{ext} is unsupported file format.') |
class BertAttOutput(nn.Module):
def __init__(self, config):
super(BertAttOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm((hidden_states + input_tensor))
return hidden_states |
def test_0166_ByteMaskedArray():
content = ak.operations.from_iter([[2, 3, 5], [999], [], [7, 11], [], [13], [123, 999], [17, 19]], highlevel=False)
mask = ak.index.Index8(np.array([0, 1, 0, 0, 1, 0, 1, 0], dtype=np.int8))
array = ak.highlevel.Array(ak.contents.ByteMaskedArray(mask, content, valid_when=False))
assert (to_list(array) == [[2, 3, 5], None, [], [7, 11], None, [13], None, [17, 19]])
assert (to_list(ak.operations.prod(array, axis=(- 1))) == [30, None, 1, 77, None, 13, None, 323])
content = ak.operations.from_iter([[[2, 3], [5]], [[999]], [], [[7], [11]], [], [[13]], [[123], [999]], [[17, 19]]], highlevel=False)
mask = ak.index.Index8(np.array([0, 1, 0, 0, 1, 0, 1, 0], dtype=np.int8))
array = ak.highlevel.Array(ak.contents.ByteMaskedArray(mask, content, valid_when=False))
assert (to_list(array) == [[[2, 3], [5]], None, [], [[7], [11]], None, [[13]], None, [[17, 19]]])
assert (to_list(ak.operations.prod(array, axis=(- 1))) == [[6, 5], None, [], [7, 11], None, [13], None, [323]])
content = ak.operations.from_iter([[2, 3], [999], [5], [7], [11], [13], [], [17], [19]], highlevel=False)
mask = ak.index.Index8(np.array([0, 1, 0, 0, 0, 0, 1, 0, 0], dtype=np.int8))
bytemasked = ak.contents.ByteMaskedArray(mask, content, valid_when=False)
offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 9], dtype=np.int64))
array = ak.highlevel.Array(ak.contents.ListOffsetArray(offsets, bytemasked))
array = ak.highlevel.Array([[[2, 3], None, [5]], [], [[7], [11]], [[13]], [None, [17], [19]]])
assert (to_list(ak.operations.prod(array, axis=(- 1))) == [[6, None, 5], [], [7, 11], [13], [None, 17, 19]])
content = ak.operations.from_iter([6, None, 5, 7, 11, 13, None, 17, 19], highlevel=False)
mask = ak.index.Index8(np.array([0, 1, 0, 0, 0, 0, 1, 0, 0], dtype=np.int8))
bytemasked = ak.contents.ByteMaskedArray.simplified(mask, content, valid_when=False)
offsets = ak.index.Index64(np.array([0, 3, 3, 5, 6, 9], dtype=np.int64))
array = ak.highlevel.Array(ak.contents.ListOffsetArray(offsets, bytemasked))
assert (to_list(array) == [[6, None, 5], [], [7, 11], [13], [None, 17, 19]])
assert (to_list(ak.operations.prod(array, axis=(- 1))) == [30, 1, 77, 13, 323]) |
class Gather(Function):
def forward(ctx, target_device, dim, *inputs):
assert all(map((lambda i: (i.device.type != 'cpu')), inputs)), 'Gather function not implemented for CPU tensors'
target_device = _get_device_index(target_device, True)
ctx.target_device = target_device
ctx.dim = dim
ctx.input_gpus = tuple(map((lambda i: i.get_device()), inputs))
if (all(((t.dim() == 0) for t in inputs)) and (dim == 0)):
inputs = tuple((t.view(1) for t in inputs))
warnings.warn('Was asked to gather along dimension 0, but all input tensors were scalars; will instead unsqueeze and return a vector.')
ctx.unsqueezed_scalar = True
else:
ctx.unsqueezed_scalar = False
ctx.input_sizes = tuple(map((lambda i: i.size(ctx.dim)), inputs))
return comm.gather(inputs, ctx.dim, ctx.target_device)
def backward(ctx, grad_output):
scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
if ctx.unsqueezed_scalar:
scattered_grads = tuple((g[0] for g in scattered_grads))
return ((None, None) + scattered_grads) |
def main():
args = get_args()
with open(args.from_path, 'r') as fp:
in_ = json.load(fp)
text = 'This is just a short sentence for test.'
paragraph = {'context': text, 'qas': []}
article = {'paragraphs': [paragraph], 'title': 'dummy'}
in_['data'].append(article)
with open(args.to_path, 'w') as fp:
json.dump(in_, fp) |
def map_tokenized_to_id(tokenized: List[List[str]], word_to_id: 'lightautoml.addons.interpretation.utils.WrappedVocabulary', min_k: int) -> List[torch.LongTensor]:
dataset = []
for sent in tokenized:
sent_list = [word_to_id['<START>']]
sent_list.extend(map(word_to_id, sent))
pad_tokens = max(1, (min_k - len(sent_list)))
sent_list.extend(([word_to_id['<PAD>']] * pad_tokens))
dataset.append(torch.Tensor(sent_list).long())
return dataset |
def _evaluate_predictions_on_coco_segm(coco_gt, coco_dt, metrics, min_threshold=0.5):
coco_eval = DensePoseCocoEval(coco_gt, coco_dt, 'segm')
coco_eval.params.iouThrs = np.linspace(min_threshold, 0.95, (int(np.round(((0.95 - min_threshold) / 0.05))) + 1), endpoint=True)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
results = {metric: float((coco_eval.stats[idx] * 100)) for (idx, metric) in enumerate(metrics)}
return results |
def _radius_of_gyration_individual(traj):
lats_lngs = traj[[constants.LATITUDE, constants.LONGITUDE]].values
center_of_mass = np.mean(lats_lngs, axis=0)
rg = np.sqrt(np.mean([(getDistanceByHaversine((lat, lng), center_of_mass) ** 2.0) for (lat, lng) in lats_lngs]))
return rg |
.operations('flaky')
def test_flaky(testdir, openapi3_schema_url):
testdir.make_test(f'''
def api_schema():
return schemathesis.from_uri('{openapi3_schema_url}')
lazy_schema = schemathesis.from_pytest_fixture("api_schema")
_schema.parametrize()
def test_(case):
case.call_and_validate()''')
result = testdir.runpytest()
result.assert_outcomes(passed=1, failed=1)
stdout = result.stdout.str()
assert ('[500] Internal Server Error' in stdout)
assert ('def run_subtest' not in stdout)
assert ('def collecting_wrapper' not in stdout)
assert ('def __flaky' not in stdout)
assert (stdout.count('test_flaky.py:3') == 1) |
def _strong_orientations_of_a_mixed_graph(Dg, V, E):
length = len(E)
i = 0
boundEdges = []
while (i < length):
(u, v) = E[i]
Dg.delete_edge(u, v)
if (not (v in Dg.depth_first_search(u))):
E[i] = E[(- 1)]
E.pop()
length -= 1
Dg.add_edge(u, v)
Dg.delete_edge(v, u)
boundEdges.append((v, u))
else:
Dg.add_edge(u, v)
Dg.delete_edge(v, u)
if (not (u in Dg.depth_first_search(v))):
E[i] = E[(- 1)]
E.pop()
length -= 1
boundEdges.append((u, v))
Dg.delete_edge(u, v)
else:
i += 1
Dg.add_edge(v, u)
if (not E):
(yield Dg.copy())
else:
(u, v) = E.pop()
Dg.delete_edge(v, u)
for orientation in _strong_orientations_of_a_mixed_graph(Dg, V, E):
(yield orientation)
Dg.add_edge(v, u)
Dg.delete_edge(u, v)
for orientation in _strong_orientations_of_a_mixed_graph(Dg, V, E):
(yield orientation)
Dg.add_edge(u, v)
E.append((u, v))
Dg.add_edges(boundEdges)
E.extend(boundEdges) |
def execute(file_name: str=None, voxel_offset: tuple=None, voxel_size: tuple=None, dtype: str=None, layer_type: str=None):
(arr, _) = nrrd.read(file_name)
if dtype:
arr = arr.astype(dtype)
chunk = Chunk(arr, voxel_offset=voxel_offset, voxel_size=voxel_size)
breakpoint()
return chunk |
_args('v', 'v', 'f', 'i')
def add(g, input_a, input_b, scale, zero_point):
kwargs = {'Y_scale_f': scale, 'Y_zero_point_i': zero_point}
output = g.op('_caffe2::Int8Add', input_a, input_b, **kwargs)
sym_help._quantized_ops.add(output)
return output |
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >'])
register_Ns3DefaultDeleter__Ns3QueueItem_methods(root_module, root_module['ns3::DefaultDeleter< ns3::QueueItem >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3QueueDiscContainer_methods(root_module, root_module['ns3::QueueDiscContainer'])
register_Ns3QueueDiscFactory_methods(root_module, root_module['ns3::QueueDiscFactory'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3Timer_methods(root_module, root_module['ns3::Timer'])
register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl'])
register_Ns3TracedValue__Bool_methods(root_module, root_module['ns3::TracedValue< bool >'])
register_Ns3TracedValue__Unsigned_int_methods(root_module, root_module['ns3::TracedValue< unsigned int >'])
register_Ns3TrafficControlHelper_methods(root_module, root_module['ns3::TrafficControlHelper'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PacketFilter_methods(root_module, root_module['ns3::PacketFilter'])
register_Ns3QueueDisc_methods(root_module, root_module['ns3::QueueDisc'])
register_Ns3QueueDiscStats_methods(root_module, root_module['ns3::QueueDisc::Stats'])
register_Ns3QueueDiscClass_methods(root_module, root_module['ns3::QueueDiscClass'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3RedQueueDisc_methods(root_module, root_module['ns3::RedQueueDisc'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TracedValue__Ns3Time_methods(root_module, root_module['ns3::TracedValue< ns3::Time >'])
register_Ns3TrafficControlLayer_methods(root_module, root_module['ns3::TrafficControlLayer'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3CoDelQueueDisc_methods(root_module, root_module['ns3::CoDelQueueDisc'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker'])
register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FqCoDelFlow_methods(root_module, root_module['ns3::FqCoDelFlow'])
register_Ns3FqCoDelQueueDisc_methods(root_module, root_module['ns3::FqCoDelQueueDisc'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3MqQueueDisc_methods(root_module, root_module['ns3::MqQueueDisc'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3PfifoFastQueueDisc_methods(root_module, root_module['ns3::PfifoFastQueueDisc'])
register_Ns3PieQueueDisc_methods(root_module, root_module['ns3::PieQueueDisc'])
register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem'])
register_Ns3StringChecker_methods(root_module, root_module['ns3::StringChecker'])
register_Ns3StringValue_methods(root_module, root_module['ns3::StringValue'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Bool_Bool_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3QueueDiscItem__gt___Const_char___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::QueueDiscItem>, const char *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3QueueDiscItem__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::QueueDiscItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Time_Ns3Time_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Time, ns3::Time, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Unsigned_int_Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3QueueDiscItem_methods(root_module, root_module['ns3::QueueDiscItem'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return |
def forgiving_state_restore(net, loaded_dict):
loaded_dict = {k.replace('module.', ''): v for (k, v) in loaded_dict.items()}
net_state_dict = net.state_dict()
new_loaded_dict = {}
for k in net_state_dict:
new_k = k
if ((new_k in loaded_dict) and (net_state_dict[k].size() == loaded_dict[new_k].size())):
new_loaded_dict[k] = loaded_dict[new_k]
else:
print('Skipped loading parameter {}'.format(k))
net_state_dict.update(new_loaded_dict)
net.load_state_dict(net_state_dict)
return net |
def extract_clips_with_consecutive_frames(path, num_clips=8, num_frames_per_clip=16):
valid = True
clips = list()
try:
video_data = skvideo.io.vread(path)
except:
print('file {} error'.format(path))
valid = False
return (list(np.zeros(shape=(num_clips, num_frames_per_clip, 3, 224, 224))), valid)
total_frames = video_data.shape[0]
img_size = (224, 224)
for i in np.linspace(0, total_frames, (num_clips + 2), dtype=np.int32)[1:(num_clips + 1)]:
clip_start = (int(i) - int((num_frames_per_clip / 2)))
clip_end = (int(i) + int((num_frames_per_clip / 2)))
if (clip_start < 0):
clip_start = 0
if (clip_end > total_frames):
clip_end = (total_frames - 1)
clip = video_data[clip_start:clip_end]
if (clip_start == 0):
shortage = (num_frames_per_clip - (clip_end - clip_start))
added_frames = []
for _ in range(shortage):
added_frames.append(np.expand_dims(video_data[clip_start], axis=0))
if (len(added_frames) > 0):
added_frames = np.concatenate(added_frames, axis=0)
clip = np.concatenate((added_frames, clip), axis=0)
if (clip_end == (total_frames - 1)):
shortage = (num_frames_per_clip - (clip_end - clip_start))
added_frames = []
for _ in range(shortage):
added_frames.append(np.expand_dims(video_data[clip_end], axis=0))
if (len(added_frames) > 0):
added_frames = np.concatenate(added_frames, axis=0)
clip = np.concatenate((clip, added_frames), axis=0)
new_clip = []
for j in range(num_frames_per_clip):
frame_data = clip[j]
img = Image.fromarray(frame_data).resize(size=img_size)
frame_data = np.asarray(img)
frame_data = np.transpose(frame_data, (2, 0, 1))
new_clip.append(frame_data)
new_clip = np.asarray(new_clip)
clips.append(new_clip)
return (clips, valid) |
class MegaOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) |
_registry.register('google_qa_answer_satisfaction')
class GoogleQuestQAAnswerSatisfaction(GoogleQuestQALabel):
def label_columns(self):
return ['answer_satisfaction']
def label_types(self):
return [_NUMERICAL] |
class DepsTableUpdateCommand(Command):
description = 'build runtime dependency table'
user_options = [('dep-table-update', None, 'updates src/transformers/dependency_versions_table.py')]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
entries = '\n'.join([f' "{k}": "{v}",' for (k, v) in deps.items()])
content = ['# THIS FILE HAS BEEN AUTOGENERATED. To update:', '# 1. modify the `_deps` dict in setup.py', '# 2. run `make deps_table_update``', 'deps = {', entries, '}', '']
target = 'src/transformers/dependency_versions_table.py'
print(f'updating {target}')
with open(target, 'w', encoding='utf-8', newline='\n') as f:
f.write('\n'.join(content)) |
def get_prior_BO_BN_instance(prior, ax, sample):
x_true = prior.sample()
noise = np.random.standard_normal(prior.size)
bx = ((ax * x_true) + (np.sqrt(ax) * noise))
(rx, vx) = prior.compute_forward_posterior(ax, bx)
vx = np.mean(vx)
mx = np.mean((x_true * rx))
qx = np.mean((rx ** 2))
mse_x = np.mean(((x_true - rx) ** 2))
A = prior.compute_log_partition(ax, bx)
return dict(vx=vx, mx=mx, qx=qx, mse_x=mse_x, A=A) |
def valuestr(data: Any, limit_rows: int, limit_cols: int, formatter: (Formatter | None)=None) -> str:
if (formatter is None):
formatter = Formatter()
if (isinstance(data, (ak.highlevel.Array, ak.highlevel.Record)) and (not data.layout.backend.nplike.known_data)):
data.layout._touch_data(recursive=True)
if isinstance(data, ak.highlevel.Array):
return '[...]'
if (limit_rows <= 1):
(_, strs) = valuestr_horiz(data, limit_cols, formatter)
return ''.join(strs)
elif isinstance(data, ak.highlevel.Array):
(front, back) = ([], [])
which = 0
for (forward, index) in alternate(len(data)):
(_, strs) = valuestr_horiz(get_at(data, index), (limit_cols - 2), formatter)
if forward:
front.append(''.join(strs))
else:
back.insert(0, ''.join(strs))
which += 1
if (which >= limit_rows):
break
if ((len(data) != 0) and (which != len(data))):
back[0] = '...'
out = (front + back)
for (i, val) in enumerate(out):
if (i > 0):
val = out[i] = (' ' + val)
else:
val = out[i] = ('[' + val)
if (i < (len(out) - 1)):
out[i] = (val + ',')
else:
out[i] = (val + ']')
return '\n'.join(out)
elif isinstance(data, ak.highlevel.Record):
is_tuple = data.layout.is_tuple
front = []
which = 0
fields = data.fields
for key in fields:
if is_tuple:
key_str = ''
elif (is_identifier.match(key) is None):
key_str = (repr(key) + ': ')
if key_str.startswith('u'):
key_str = key_str[1:]
else:
key_str = (key + ': ')
(_, strs) = valuestr_horiz(get_field(data, key), ((limit_cols - 2) - len(key_str)), formatter)
front.append((key_str + ''.join(strs)))
which += 1
if (which >= limit_rows):
break
if ((len(fields) != 0) and (which != len(fields))):
front[(- 1)] = '...'
out = front
for (i, val) in enumerate(out):
if (i > 0):
val = out[i] = (' ' + val)
elif data.is_tuple:
val = out[i] = ('(' + val)
else:
val = out[i] = ('{' + val)
if (i < (len(out) - 1)):
out[i] = (val + ',')
elif data.is_tuple:
out[i] = (val + ')')
else:
out[i] = (val + '}')
return '\n'.join(out)
else:
raise AssertionError(type(data)) |
def DM_28_6_1():
z = 2
M = [[(0, 0), ((z + 1), 6), (1, 1), (1, 1), (1, 3), (1, 4), (0, 0), (1, 4), (z, 5)], [(z, 2), (0, 0), (1, 5), (z, 1), (z, 2), (z, 6), ((z + 1), 3), (0, 0), (z, 1)], [(z, 3), ((z + 1), 4), (0, 0), ((z + 1), 5), ((z + 1), 2), ((z + 1), 4), ((z + 1), 2), (1, 6), (0, 0)], [(0, 5), (z, 6), (0, 5), (0, 6), (z, 3), (0, 0), (0, 4), (1, 5), ((z + 1), 4)], [(0, 3), (0, 3), ((z + 1), 5), (0, 0), (0, 5), ((z + 1), 6), (1, 1), (0, 1), (z, 3)], [(1, 3), (0, 6), (0, 6), (1, 5), (0, 0), (0, 3), ((z + 1), 6), (z, 2), (0, 2)]]
from sage.groups.additive_abelian.additive_abelian_group import AdditiveAbelianGroup
from sage.modules.free_module_element import free_module_element as vector
G = AdditiveAbelianGroup([2, 2, 7])
M = [[G(vector([(x // 2), (x % 2), y])) for (x, y) in L] for L in M]
Mb = [[0, 0, 0, 0, 0, 0]]
for R in zip(*M):
(a, b, c, d, e, f) = R
Mb.append([a, b, c, d, e, f])
Mb.append([b, c, a, f, d, e])
Mb.append([c, a, b, e, f, d])
return (G, Mb) |
def activation(fn_name):
fn = None
if (fn_name == 'relu'):
fn = tf.nn.relu
elif (fn_name == 'elu'):
fn = tf.nn.elu
elif (fn_name == 'leaky_relu'):
fn = tf.nn.leaky_relu
return fn |
def capitalize(text, language, resources):
tokens = tokenize_light(text, language)
stop_words = get_stop_words(resources)
return get_default_sep(language).join(((t.title() if (t.lower() not in stop_words) else t.lower()) for t in tokens)) |
def rgb_to_yiq(r, g, b):
y = (((0.3 * r) + (0.59 * g)) + (0.11 * b))
i = ((0.74 * (r - y)) - (0.27 * (b - y)))
q = ((0.48 * (r - y)) + (0.41 * (b - y)))
return (y, i, q) |
.parametrize('n_unique_action, is_factorizable, evaluation_policy_type, epsilon, description', valid_input_of_generate_evaluation_policy_pscore)
def test_generate_evaluation_policy_pscore_using_valid_input_data(n_unique_action, is_factorizable, evaluation_policy_type, epsilon, description) -> None:
len_list = 3
dim_context = 2
reward_type = 'binary'
random_state = 12345
n_rounds = 100
dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, random_state=random_state, is_factorizable=is_factorizable, base_reward_function=logistic_reward_function)
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds, return_pscore_item_position=True)
(pscore, pscore_item_position, pscore_cascade) = dataset.generate_evaluation_policy_pscore(evaluation_policy_type=evaluation_policy_type, context=bandit_feedback['context'], epsilon=epsilon, action=bandit_feedback['action'])
if ((evaluation_policy_type == 'random') or (epsilon == 1.0)):
assert np.allclose(pscore, bandit_feedback['pscore'])
assert np.allclose(pscore_item_position, bandit_feedback['pscore_item_position'])
assert np.allclose(pscore_cascade, bandit_feedback['pscore_cascade'])
if (epsilon == 0.0):
assert (len((set(np.unique(pscore)) - set([0.0, 1.0]))) == 0)
assert (len((set(np.unique(pscore_item_position)) - set([0.0, 1.0]))) == 0)
assert (len((set(np.unique(pscore_cascade)) - set([0.0, 1.0]))) == 0)
assert ((pscore_cascade < pscore).sum() == 0), 'pscore must be smaller than or equal to pscore_cascade'
assert ((pscore_item_position < pscore).sum() == 0), 'pscore must be smaller than or equal to pscore_item_position'
assert ((pscore_item_position < pscore_cascade).sum() == 0), 'pscore_cascade must be smaller than or equal to pscore_item_position'
check_slate_bandit_feedback(bandit_feedback=bandit_feedback, is_factorizable=is_factorizable)
bandit_feedback_df = pd.DataFrame()
for column in ['slate_id', 'position', 'action']:
bandit_feedback_df[column] = bandit_feedback[column]
bandit_feedback_df['pscore'] = pscore
bandit_feedback_df['pscore_cascade'] = pscore_cascade
bandit_feedback_df['pscore_item_position'] = pscore_item_position
previous_minimum_pscore_cascade = bandit_feedback_df.groupby('slate_id')['pscore_cascade'].expanding().min().values
assert ((previous_minimum_pscore_cascade < pscore_cascade).sum() == 0), 'pscore_cascade must be non-decresing sequence in each slate'
count_pscore_in_expression = bandit_feedback_df.groupby('slate_id').apply((lambda x: x['pscore'].unique().shape[0]))
assert ((count_pscore_in_expression != 1).sum() == 0), '`pscore` must be unique in each slate'
last_slot_feedback_df = bandit_feedback_df.drop_duplicates('slate_id', keep='last')
assert np.allclose(last_slot_feedback_df['pscore'], last_slot_feedback_df['pscore_cascade']), 'pscore must be the same as pscore_cascade in the last slot' |
def _good_shape(x, shape, axes):
if ((shape is not None) and (axes is None)):
shape = _helper._iterable_of_int(shape, 'shape')
if (len(shape) != np.ndim(x)):
raise ValueError('when given, axes and shape arguments have to be of the same length')
return shape |
def ref_top_n_error(x, l, axis, n):
orig_x = x.copy()
x = np.rollaxis(x, axis, x.ndim).reshape((- 1), x.shape[axis])
ll = np.rollaxis(l, axis, x.ndim).flatten()
y = []
for (x_, ll_) in zip(x, ll):
threshold = x_[ll_]
count = 0
for x__ in x_:
if (x__ >= threshold):
count += 1
y.append((1 if (count > n) else 0))
return np.array(y).reshape(l.shape) |
def conv1x1(in_planes, out_planes, stride=1, bias=False):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=bias) |
class RandomExtremeCartPole(ModifiableCartPoleEnv):
def __init__(self):
super(RandomExtremeCartPole, self).__init__()
self.force_mag = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_FORCE_MAG, self.EXTREME_UPPER_FORCE_MAG, self.RANDOM_LOWER_FORCE_MAG, self.RANDOM_UPPER_FORCE_MAG)
self.length = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_LENGTH, self.EXTREME_UPPER_LENGTH, self.RANDOM_LOWER_LENGTH, self.RANDOM_UPPER_LENGTH)
self.masspole = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_MASSPOLE, self.EXTREME_UPPER_MASSPOLE, self.RANDOM_LOWER_MASSPOLE, self.RANDOM_UPPER_MASSPOLE)
self._followup()
def reset(self, new=True):
self.nsteps = 0
self.state = self.np_random.uniform(low=(- 0.05), high=0.05, size=(4,))
self.steps_beyond_done = None
if new:
self.force_mag = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_FORCE_MAG, self.EXTREME_UPPER_FORCE_MAG, self.RANDOM_LOWER_FORCE_MAG, self.RANDOM_UPPER_FORCE_MAG)
self.length = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_LENGTH, self.EXTREME_UPPER_LENGTH, self.RANDOM_LOWER_LENGTH, self.RANDOM_UPPER_LENGTH)
self.masspole = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_MASSPOLE, self.EXTREME_UPPER_MASSPOLE, self.RANDOM_LOWER_MASSPOLE, self.RANDOM_UPPER_MASSPOLE)
self._followup()
return np.array(self.state)
def parameters(self):
parameters = super(RandomExtremeCartPole, self).parameters
parameters.update({'force_mag': self.force_mag, 'length': self.length, 'masspole': self.masspole, 'total_mass': self.total_mass, 'polemass_length': self.polemass_length})
return parameters |
def sentence_distance(anaphor, antecedent):
return ('sentence_distance', __compute_sentence_distance(anaphor, antecedent)) |
def process_test(query_path, gallery_path):
query_img_paths = glob.glob(os.path.join(query_path, '*.jpg'))
gallery_img_paths = glob.glob(os.path.join(gallery_path, '*.jpg'))
query_paths = []
pattern = re.compile('([-\\d]+)_(\\d*)')
for img_path in query_img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
query_paths.append([img_path, pid, camid])
gallery_paths = []
for img_path in gallery_img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
gallery_paths.append([img_path, pid, camid])
return (query_paths, gallery_paths) |
class ModelWrapper(object):
def __init__(self, name=None, display=False):
self.visuals = [('universe', pin.SE3.Identity(), pin.SE3.Identity().translation)]
self.name = (self.__class__.__name__ if (name is None) else name)
self.model = pin.Model()
self.display = display
self.add_joints()
def add_joints(self):
for joint in self.joints:
self.add_joint(**joint)
def add_joint(self, joint_name, joint_model=None, joint_placement=None, lever=None, shape='box', dimensions=1, mass=None, body_color=1, parent=0):
if (joint_model is None):
joint_model = pin.JointModelFreeFlyer()
elif isinstance(joint_model, str):
joint_model = pin.__dict__[('JointModel' + joint_model)]()
if (joint_placement is None):
joint_placement = pin.SE3.Identity()
elif isinstance(joint_placement, dict):
joint_placement = placement(**joint_placement)
if (lever is None):
lever = pin.SE3.Identity()
elif isinstance(lever, dict):
lever = placement(**lever)
(joint_name, body_name) = (('world/%s_%s_%s' % (self.name, joint_name, i)) for i in ('joint', 'body'))
body_inertia = pin.Inertia.Random()
if (shape == 'box'):
(w, h, d) = ((float(i) for i in dimensions) if isinstance(dimensions, tuple) else ([float(dimensions)] * 3))
if (mass is None):
mass = (((w * h) * d) * DENSITY)
body_inertia = pin.Inertia.FromBox(mass, w, h, d)
if self.display:
self.display.viewer.gui.addBox(body_name, w, h, d, color(body_color))
elif (shape == 'cylinder'):
(r, h) = dimensions
if (mass is None):
mass = (((pi * (r ** 2)) * h) * DENSITY)
body_inertia = pin.Inertia.FromCylinder(mass, r, h)
if self.display:
self.display.viewer.gui.addCylinder(body_name, r, h, color(body_color))
elif (shape == 'sphere'):
(w, h, d) = ((float(i) for i in dimensions) if isinstance(dimensions, tuple) else ([float(dimensions)] * 3))
if (mass is None):
mass = ((((((4.0 / 3.0) * pi) * w) * h) * d) * DENSITY)
body_inertia = pin.Inertia.FromEllipsoid(mass, w, h, d)
if self.display:
self.display.viewer.gui.addSphere(body_name, dimensions, color(body_color))
body_inertia.lever = lever.translation
joint_id = self.model.addJoint(parent, joint_model, joint_placement, joint_name)
self.model.appendBodyToJoint(joint_id, body_inertia, pin.SE3.Identity())
self.model.addJointFrame(joint_id, (- 1))
self.model.addBodyFrame(body_name, joint_id, pin.SE3.Identity(), (- 1))
self.visuals.append((body_name, joint_placement, lever))
self.data = self.model.createData()
if self.display:
self.place()
def place(self):
for (i, (name, placement, lever)) in enumerate(self.visuals):
if (i == 0):
continue
self.display.place(name, ((self.data.oMi[i] * placement) * lever))
self.display.viewer.gui.refresh() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.