code stringlengths 101 5.91M |
|---|
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument('bliss_filename', nargs='+')
arg_parser.add_argument('--output', default='/dev/stdout')
args = arg_parser.parse_args()
if args.output.endswith('.gz'):
out = gzip.GzipFile(args.output, mode='wb')
else:
out = open(args.output, 'wb')
out.write(b'{\n')
for bliss_item in itertools.chain(*[iter_bliss(fn) for fn in args.bliss_filename]):
assert isinstance(bliss_item, BlissItem)
seq_len = round((bliss_item.delta_time * 100.0))
out.write((b'%r: %i,\n' % (bliss_item.segment_name, seq_len)))
out.write(b'}\n')
out.close() |
def runNonMotifTICC(inputName, outputName, clusters, beta, oldAssignmentsName):
runTest(0, inputName, outputName, clusters, beta, 1, 1, oldAssignmentsName) |
class TecoGANDiscriminator(nn.Module):
def __init__(self, resolution, input_channels):
super(TecoGANDiscriminator, self).__init__()
self.resolution = resolution
self.input_channels = input_channels
assert ((resolution & (resolution - 1)) == 0), ('resolution is not a power of two: %d' % resolution)
modules = []
channels = input_channels
while (resolution > 32):
resolution //= 2
modules = (modules + [nn.Conv2d(channels, 64, 4, 2, 1, bias=False), nn.LeakyReLU(0.2)])
channels = 64
modules = (modules + [nn.Conv2d(64, 128, 4, 2, 1, bias=False), nn.LeakyReLU(0.2), nn.Conv2d(128, 256, 4, 2, 1, bias=False), nn.LeakyReLU(0.2), nn.Conv2d(256, 256, 4, 2, 1, bias=False), nn.LeakyReLU(0.2)])
self.features = nn.Sequential(*modules)
self.classifier = nn.Sequential(nn.Linear(4096, 1, True))
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
(b, c, h, w) = x.shape
assert (c == self.input_channels)
assert (h == self.resolution)
assert (w == self.resolution)
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x |
class FacebookManagerSendMessage(VirtualFunctionTool):
name = 'FacebookManagerSendMessage'
summary = 'Send a message to another user.'
parameters: List[ArgParameter] = [{'name': 'recipient_id', 'type': 'string', 'description': 'The unique identifier of the recipient.', 'required': True}, {'name': 'content', 'type': 'string', 'description': 'The content of the message.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'Whether the message was successfully sent.'}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'recipient_id' parameter is not found."}, {'name': 'InvalidRequestException', 'description': "The 'content' parameter is empty."}] |
class FairseqDataset(torch.utils.data.Dataset):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
raise NotImplementedError
def num_tokens(self, index):
raise NotImplementedError
def size(self, index):
raise NotImplementedError
def ordered_indices(self):
print('zzz')
return np.arange(len(self))
def supports_prefetch(self):
return False
def prefetch(self, indices):
raise NotImplementedError
def set_epoch(self, epoch):
pass |
def test_get_collaborators_for_task(assigner):
collaborators = assigner.get_collaborators_for_task('train', 2)
assert (collaborators == ['one', 'two']) |
class UniformReplayBuffer(ReplayBuffer):
def __init__(self):
self._episodes = []
def __len__(self):
return len(self._episodes)
def sample(self, num_episodes):
indices = np.random.choice(len(self._episodes), size=num_episodes, replace=True)
episodes = [self._episodes[i] for i in indices]
probs = ([1.0] * len(episodes))
trace = None
return (episodes, probs, trace)
def extend(self, episodes):
self._episodes.extend(episodes)
def status(self):
return 'size: {}'.format(len(self)) |
def context_gate_factory(type, embeddings_size, decoder_size, attention_size, output_size):
gate_types = {'source': SourceContextGate, 'target': TargetContextGate, 'both': BothContextGate}
assert (type in gate_types), 'Not valid ContextGate type: {0}'.format(type)
return gate_types[type](embeddings_size, decoder_size, attention_size, output_size) |
def get_F0(wav):
(f0, _, _) = librosa.pyin(wav, fmin=librosa.note_to_hz('C2'), fmax=librosa.note_to_hz('C7'))
return f0 |
def check_supported(metrics):
for mn in metrics:
if (mn not in supported_metrics):
raise ValueError(('metric name not supported %s, supported metrics: %s' % (mn, supported_metrics))) |
class OnPolicyVectorizedSampler(BatchSampler):
def __init__(self, algo, env, n_envs=None):
if (n_envs is None):
n_envs = (singleton_pool.n_parallel * 4)
super().__init__(algo, env)
self._n_envs = n_envs
self._vec_env = None
self._env_spec = self.env.spec
warnings.warn(DeprecationWarning('OnPolicyVectoriizedSampler is deprecated, and will be removed in the next release. Please use VecWorker and one of the new samplers which implement garage.sampler.Sampler, such as RaySampler.'))
def start_worker(self):
n_envs = self._n_envs
envs = [cloudpickle.loads(cloudpickle.dumps(self.env)) for _ in range(n_envs)]
seed0 = deterministic.get_seed()
if (seed0 is not None):
for (i, e) in enumerate(envs):
e.seed((seed0 + i))
self._vec_env = VecEnvExecutor(envs=envs, max_path_length=self.algo.max_path_length)
def shutdown_worker(self):
self._vec_env.close()
def obtain_samples(self, itr, batch_size=None, whole_paths=True):
logger.log(('Obtaining samples for iteration %d...' % itr))
if (not batch_size):
batch_size = (self.algo.max_path_length * self._n_envs)
paths = []
n_samples = 0
obses = self._vec_env.reset()
completes = np.asarray(([True] * self._vec_env.num_envs))
running_paths = ([None] * self._vec_env.num_envs)
policy_time = 0
env_time = 0
process_time = 0
policy = self.algo.policy
with click.progressbar(length=batch_size, label='Sampling') as pbar:
while (n_samples < batch_size):
t = time.time()
policy.reset(completes)
(actions, agent_infos) = policy.get_actions(obses)
policy_time += (time.time() - t)
t = time.time()
(next_obses, rewards, dones, env_infos, completes) = self._vec_env.step(actions)
env_time += (time.time() - t)
t = time.time()
agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
env_infos = tensor_utils.split_tensor_dict_list(env_infos)
if (env_infos is None):
env_infos = [dict() for _ in range(self._vec_env.num_envs)]
if (agent_infos is None):
agent_infos = [dict() for _ in range(self._vec_env.num_envs)]
for (idx, observation, action, reward, env_info, agent_info, done, complete) in zip(itertools.count(), obses, actions, rewards, env_infos, agent_infos, dones, completes):
if (running_paths[idx] is None):
running_paths[idx] = dict(observations=[], actions=[], rewards=[], env_infos=[], agent_infos=[], dones=[])
running_paths[idx]['observations'].append(observation)
running_paths[idx]['actions'].append(action)
running_paths[idx]['rewards'].append(reward)
running_paths[idx]['env_infos'].append(env_info)
running_paths[idx]['agent_infos'].append(agent_info)
running_paths[idx]['dones'].append(done)
if complete:
obs = np.asarray(running_paths[idx]['observations'])
actions = np.asarray(running_paths[idx]['actions'])
paths.append(dict(observations=obs, actions=actions, rewards=np.asarray(running_paths[idx]['rewards']), env_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]['env_infos']), agent_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]['agent_infos']), dones=np.asarray(running_paths[idx]['dones'])))
n_samples += len(running_paths[idx]['rewards'])
running_paths[idx] = None
process_time += (time.time() - t)
pbar.update(len(obses))
obses = next_obses
tabular.record('PolicyExecTime', policy_time)
tabular.record('EnvExecTime', env_time)
tabular.record('ProcessExecTime', process_time)
return (paths if whole_paths else truncate_paths(paths, batch_size)) |
_utils.test()
def test_return_type_mismatch_2():
with pytest.raises(ti.TaichiCompilationError):
def foo() -> ti.math.vec4:
return ti.math.vec3([1, 2, 3])
foo() |
def test_multiannotator_events():
event_data1 = annotations.Events(np.array([[0.2, 0.3], [0.3, 0.4]]), 'seconds', ['event A', 'event B'], 'open', np.array([1.0, 1.0]))
event_data2 = annotations.Events(np.array([[0.2, 0.3], [0.3, 0.4]]), 'seconds', ['', 'a great label'], 'open', np.array([0.0, 1.0]))
event_data3 = annotations.Events(np.array([[0.2, 0.3], [0.3, 20.0]]), 'seconds', ['', 'a great label'], 'open', np.array([0.0, 1.0]))
multiannotator_data = annotations.MultiAnnotator(['01', '02', '03'], [event_data1, event_data2, event_data3])
jam = jams_utils.jams_converter(events=multiannotator_data, metadata={'duration': 10.0})
assert jam.validate() |
.node
class Dot(dace.sdfg.nodes.LibraryNode):
implementations = {'pure': ExpandDotPure, 'OpenBLAS': ExpandDotOpenBLAS, 'MKL': ExpandDotMKL, 'cuBLAS': ExpandDotCuBLAS, 'FPGA_PartialSums': ExpandDotFpgaPartialSums, 'FPGA_Accumulate': ExpandDotFpgaAccumulate}
default_implementation = None
n = dace.properties.SymbolicProperty(allow_none=True, default=None)
accumulator_type = dace.properties.TypeClassProperty(default=None, choices=dtypes.Typeclasses, allow_none=True, desc='Accumulator or intermediate storage type')
def __init__(self, name, n=None, accumulator_type=None, **kwargs):
super().__init__(name, inputs={'_x', '_y'}, outputs={'_result'}, **kwargs)
self.n = n
self.accumulator_type = accumulator_type
def validate(self, sdfg, state):
in_edges = state.in_edges(self)
if (len(in_edges) != 2):
raise ValueError('Expected exactly two inputs to dot product')
out_edges = state.out_edges(self)
if (len(out_edges) != 1):
raise ValueError('Expected exactly one output from dot product')
out_memlet = out_edges[0].data
(desc_x, desc_y, desc_res) = (None, None, None)
in_memlets = [None, None]
for e in state.in_edges(self):
if (e.dst_conn == '_x'):
desc_x = sdfg.arrays[e.data.data]
in_memlets[0] = e.data
elif (e.dst_conn == '_y'):
desc_y = sdfg.arrays[e.data.data]
in_memlets[1] = e.data
for e in state.out_edges(self):
if (e.src_conn == '_result'):
desc_res = sdfg.arrays[e.data.data]
if (desc_x.dtype != desc_y.dtype):
raise TypeError(f'Data types of input operands must be equal: {desc_x.dtype}, {desc_y.dtype}')
if (desc_x.dtype.base_type != desc_res.dtype.base_type):
raise TypeError(f'Data types of input and output must be equal: {desc_x.dtype}, {desc_res.dtype}')
squeezed1 = copy.deepcopy(in_memlets[0].subset)
squeezed2 = copy.deepcopy(in_memlets[1].subset)
sqdims1 = squeezed1.squeeze()
sqdims2 = squeezed2.squeeze()
if ((len(squeezed1.size()) != 1) or (len(squeezed2.size()) != 1)):
raise ValueError('dot product only supported on 1-dimensional arrays')
if (out_memlet.subset.num_elements() != 1):
raise ValueError('Output of dot product must be a single element')
stride_x = desc_x.strides[sqdims1[0]]
stride_y = desc_y.strides[sqdims2[0]]
n = squeezed1.num_elements()
if (squeezed1.num_elements() != squeezed2.num_elements()):
raise ValueError('Size mismatch in inputs')
return ((desc_x, stride_x), (desc_y, stride_y), desc_res, n) |
class DeclarationWriter(TreeVisitor):
indent_string = u' '
def __init__(self, result=None):
super(DeclarationWriter, self).__init__()
if (result is None):
result = LinesResult()
self.result = result
self.numindents = 0
self.tempnames = {}
self.tempblockindex = 0
def write(self, tree):
self.visit(tree)
return self.result
def indent(self):
self.numindents += 1
def dedent(self):
self.numindents -= 1
def startline(self, s=u''):
self.result.put(((self.indent_string * self.numindents) + s))
def put(self, s):
self.result.put(s)
def putline(self, s):
self.result.putline(((self.indent_string * self.numindents) + s))
def endline(self, s=u''):
self.result.putline(s)
def line(self, s):
self.startline(s)
self.endline()
def comma_separated_list(self, items, output_rhs=False):
if (len(items) > 0):
for item in items[:(- 1)]:
self.visit(item)
if (output_rhs and (item.default is not None)):
self.put(u' = ')
self.visit(item.default)
self.put(u', ')
self.visit(items[(- 1)])
def visit_Node(self, node):
raise AssertionError(('Node not handled by serializer: %r' % node))
def visit_ModuleNode(self, node):
self.visitchildren(node)
def visit_StatListNode(self, node):
self.visitchildren(node)
def visit_CDefExternNode(self, node):
if (node.include_file is None):
file = u'*'
else:
file = (u'"%s"' % node.include_file)
self.putline((u'cdef extern from %s:' % file))
self.indent()
self.visit(node.body)
self.dedent()
def visit_CPtrDeclaratorNode(self, node):
self.put('*')
self.visit(node.base)
def visit_CReferenceDeclaratorNode(self, node):
self.put('&')
self.visit(node.base)
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if (node.dimension is not None):
self.visit(node.dimension)
self.put(u']')
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if (node.dimension is not None):
self.visit(node.dimension)
self.put(u']')
def visit_CFuncDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'(')
self.comma_separated_list(node.args)
self.endline(u')')
def visit_CNameDeclaratorNode(self, node):
self.put(node.name)
def visit_CSimpleBaseTypeNode(self, node):
if node.is_basic_c_type:
self.put(('unsigned ', '', 'signed ')[node.signed])
if (node.longness < 0):
self.put(('short ' * (- node.longness)))
elif (node.longness > 0):
self.put(('long ' * node.longness))
self.put(node.name)
def visit_CComplexBaseTypeNode(self, node):
self.put(u'(')
self.visit(node.base_type)
self.visit(node.declarator)
self.put(u')')
def visit_CNestedBaseTypeNode(self, node):
self.visit(node.base_type)
self.put(u'.')
self.put(node.name)
def visit_TemplatedTypeNode(self, node):
self.visit(node.base_type_node)
self.put(u'[')
self.comma_separated_list((node.positional_args + node.keyword_args.key_value_pairs))
self.put(u']')
def visit_CVarDefNode(self, node):
self.startline(u'cdef ')
self.visit(node.base_type)
self.put(u' ')
self.comma_separated_list(node.declarators, output_rhs=True)
self.endline()
def visit_container_node(self, node, decl, extras, attributes):
self.startline(decl)
if node.name:
self.put(u' ')
self.put(node.name)
if (node.cname is not None):
self.put((u' "%s"' % node.cname))
if extras:
self.put(extras)
self.endline(':')
self.indent()
if (not attributes):
self.putline('pass')
else:
for attribute in attributes:
self.visit(attribute)
self.dedent()
def visit_CStructOrUnionDefNode(self, node):
if node.typedef_flag:
decl = u'ctypedef '
else:
decl = u'cdef '
if (node.visibility == 'public'):
decl += u'public '
if node.packed:
decl += u'packed '
decl += node.kind
self.visit_container_node(node, decl, None, node.attributes)
def visit_CppClassNode(self, node):
extras = ''
if node.templates:
extras = (u'[%s]' % ', '.join(node.templates))
if node.base_classes:
extras += ('(%s)' % ', '.join(node.base_classes))
self.visit_container_node(node, u'cdef cppclass', extras, node.attributes)
def visit_CEnumDefNode(self, node):
self.visit_container_node(node, u'cdef enum', None, node.items)
def visit_CEnumDefItemNode(self, node):
self.startline(node.name)
if node.cname:
self.put((u' "%s"' % node.cname))
if node.value:
self.put(u' = ')
self.visit(node.value)
self.endline()
def visit_CClassDefNode(self, node):
assert (not node.module_name)
if node.decorators:
for decorator in node.decorators:
self.visit(decorator)
self.startline(u'cdef class ')
self.put(node.class_name)
if node.base_class_name:
self.put(u'(')
if node.base_class_module:
self.put(node.base_class_module)
self.put(u'.')
self.put(node.base_class_name)
self.put(u')')
self.endline(u':')
self.indent()
self.visit(node.body)
self.dedent()
def visit_CTypeDefNode(self, node):
self.startline(u'ctypedef ')
self.visit(node.base_type)
self.put(u' ')
self.visit(node.declarator)
self.endline()
def visit_FuncDefNode(self, node):
self.startline((u'def %s(' % node.name))
self.comma_separated_list(node.args)
self.endline(u'):')
self.indent()
self.visit(node.body)
self.dedent()
def visit_CArgDeclNode(self, node):
if (node.base_type.name is not None):
self.visit(node.base_type)
self.put(u' ')
self.visit(node.declarator)
if (node.default is not None):
self.put(u' = ')
self.visit(node.default)
def visit_CImportStatNode(self, node):
self.startline(u'cimport ')
self.put(node.module_name)
if node.as_name:
self.put(u' as ')
self.put(node.as_name)
self.endline()
def visit_FromCImportStatNode(self, node):
self.startline(u'from ')
self.put(node.module_name)
self.put(u' cimport ')
first = True
for (pos, name, as_name, kind) in node.imported_names:
assert (kind is None)
if first:
first = False
else:
self.put(u', ')
self.put(name)
if as_name:
self.put(u' as ')
self.put(as_name)
self.endline()
def visit_NameNode(self, node):
self.put(node.name)
def visit_IntNode(self, node):
self.put(node.value)
def visit_NoneNode(self, node):
self.put(u'None')
def visit_NotNode(self, node):
self.put(u'(not ')
self.visit(node.operand)
self.put(u')')
def visit_DecoratorNode(self, node):
self.startline('')
self.visit(node.decorator)
self.endline()
def visit_BinopNode(self, node):
self.visit(node.operand1)
self.put((u' %s ' % node.operator))
self.visit(node.operand2)
def visit_AttributeNode(self, node):
self.visit(node.obj)
self.put((u'.%s' % node.attribute))
def visit_BoolNode(self, node):
self.put(str(node.value))
def visit_StringNode(self, node):
value = node.value
if (value.encoding is not None):
value = value.encode(value.encoding)
self.put(repr(value))
def visit_PassStatNode(self, node):
self.startline(u'pass')
self.endline() |
class PandasPredictionCallback(BasePredictionCallback[PandasDataFrame]):
def _ids_to_result(self, query_ids: torch.Tensor, item_ids: torch.Tensor, item_scores: torch.Tensor) -> PandasDataFrame:
prediction = PandasDataFrame({self.query_column: query_ids.flatten().cpu().numpy(), self.item_column: list(item_ids.cpu().numpy()), self.rating_column: list(item_scores.cpu().numpy())})
return prediction.explode([self.item_column, self.rating_column]) |
def define(approx_order=1):
from sfepy import data_dir
filename_mesh = (data_dir + '/meshes/3d/block.mesh')
options = {'nls': 'newton', 'ls': 'ls', 'post_process_hook': 'verify_tractions'}
functions = {'linear_tension': (linear_tension,)}
fields = {'displacement': ('real', 3, 'Omega', approx_order)}
materials = {'solid': ({'D': stiffness_from_lame(3, lam=5.769, mu=3.846)},), 'load': (None, 'linear_tension')}
variables = {'u': ('unknown field', 'displacement', 0), 'v': ('test field', 'displacement', 'u')}
regions = {'Omega': 'all', 'Left': ('vertices in (x < -4.99)', 'facet'), 'Middle': ('vertices in (x > -1e-10) & (x < 1e-10)', 'facet', 'Rhalf'), 'Rhalf': 'vertices in x > -1e-10', 'Right': ('vertices in (x > 4.99)', 'facet')}
ebcs = {'fixb': ('Left', {'u.all': 0.0}), 'fixt': ('Right', {'u.[1,2]': 0.0})}
integrals = {'i': (2 * approx_order)}
equations = {'elasticity': 'dw_lin_elastic.i.Omega( solid.D, v, u )\n = - dw_surface_ltr.i.Right( load.val, v )'}
solvers = {'ls': ('ls.auto_direct', {}), 'newton': ('nls.newton', {'i_max': 1, 'eps_a': 1e-10, 'eps_r': 1.0, 'macheps': 1e-16, 'lin_red': 0.01, 'ls_red': 0.1, 'ls_red_warp': 0.001, 'ls_on': 1.1, 'ls_min': 1e-05, 'check': 0, 'delta': 1e-06})}
return locals() |
def apply_activation(W, funcs, n_double=0):
W = sym.Matrix(W)
if (n_double == 0):
for i in range(W.shape[0]):
for j in range(W.shape[1]):
W[(i, j)] = funcs[j](W[(i, j)])
else:
W_new = W.copy()
out_size = len(funcs)
for i in range(W.shape[0]):
in_j = 0
out_j = 0
while (out_j < (out_size - n_double)):
W_new[(i, out_j)] = funcs[out_j](W[(i, in_j)])
in_j += 1
out_j += 1
while (out_j < out_size):
W_new[(i, out_j)] = funcs[out_j](W[(i, in_j)], W[(i, (in_j + 1))])
in_j += 2
out_j += 1
for i in range(n_double):
W_new.col_del((- 1))
W = W_new
return W |
def _compute_variance(N, cur_data, expected_max_cond_n, pdfs):
variance_of_max_cond_n = []
for n in range(N):
cur_var = 0
for i in range(N):
cur_var += (((cur_data[i] - expected_max_cond_n[n]) ** 2) * pdfs[n][i])
cur_var = np.sqrt(cur_var)
variance_of_max_cond_n.append(cur_var)
return variance_of_max_cond_n |
class Data():
def __init__(self, args, batch_size):
self.args = args
self.batch_size = batch_size
self.data_loader = None
def gen_data(self):
args = self.args
if (args.mask == 'indep'):
data = IndepMaskedCelebA(obs_prob=args.obs_prob)
elif (args.mask == 'block'):
data = BlockMaskedCelebA(block_len=args.block_len)
self.data_size = len(data)
self.data_loader = DataLoader(data, batch_size=self.batch_size)
def get_data(self):
if (self.data_loader is None):
self.gen_data()
return (self.data_loader, self.data_size) |
def inception_v4_base(input):
if (K.image_dim_ordering() == 'th'):
channel_axis = 1
else:
channel_axis = (- 1)
net = conv2d_bn(input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
net = conv2d_bn(net, 32, 3, 3, border_mode='valid')
net = conv2d_bn(net, 64, 3, 3)
branch_0 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(net)
branch_1 = conv2d_bn(net, 96, 3, 3, subsample=(2, 2), border_mode='valid')
net = merge([branch_0, branch_1], mode='concat', concat_axis=channel_axis)
branch_0 = conv2d_bn(net, 64, 1, 1)
branch_0 = conv2d_bn(branch_0, 96, 3, 3, border_mode='valid')
branch_1 = conv2d_bn(net, 64, 1, 1)
branch_1 = conv2d_bn(branch_1, 64, 1, 7)
branch_1 = conv2d_bn(branch_1, 64, 7, 1)
branch_1 = conv2d_bn(branch_1, 96, 3, 3, border_mode='valid')
net = merge([branch_0, branch_1], mode='concat', concat_axis=channel_axis)
branch_0 = conv2d_bn(net, 192, 3, 3, subsample=(2, 2), border_mode='valid')
branch_1 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(net)
net = merge([branch_0, branch_1], mode='concat', concat_axis=channel_axis)
for idx in xrange(4):
net = block_inception_a(net)
net = block_reduction_a(net)
for idx in xrange(7):
net = block_inception_b(net)
net = block_reduction_b(net)
for idx in xrange(3):
net = block_inception_c(net)
return net |
def savefig(fname, dpi=None):
dpi = (150 if (dpi == None) else dpi)
plt.savefig(fname, dpi=dpi) |
.parametrize('generator,expected_result', [(ag.AssertionGenerator, "str_0 = 'foo bar'\nfloat_0 = 39.82\nhuman_0 = module_0.Human(str_0, float_0)\nassert f'{type(human_0).__module__}.{type(human_0).__qualname__}' == 'tests.fixtures.examples.assertions.Human'\nassert module_0.static_state == 0\nstr_1 = human_0.get_name()\nassert str_1 == 'foo bar'"), (ag.MutationAnalysisAssertionGenerator, "str_0 = 'foo bar'\nfloat_0 = 39.82\nhuman_0 = module_0.Human(str_0, float_0)\nassert module_0.static_state == 0\nstr_1 = human_0.get_name()")])
def test_generate_mutation_assertions(generator, expected_result):
config.configuration.module_name = 'tests.fixtures.examples.assertions'
module_name = config.configuration.module_name
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
with install_import_hook(module_name, tracer):
importlib.reload(importlib.import_module(module_name))
cluster = generate_test_cluster(module_name)
transformer = AstToTestCaseTransformer(cluster, False, EmptyConstantProvider())
transformer.visit(ast.parse("def test_case_0():\n str_0 = 'foo bar'\n float_0 = 39.82\n human_0 = module_0.Human(str_0, float_0)\n str_1 = human_0.get_name()\n "))
test_case = transformer.testcases[0]
chromosome = tcc.TestCaseChromosome(test_case)
suite = tsc.TestSuiteChromosome()
suite.add_test_case_chromosome(chromosome)
gen = generator(TestCaseExecutor(tracer))
suite.accept(gen)
visitor = tc_to_ast.TestCaseToAstVisitor(ns.NamingScope(prefix='module'), set())
test_case.accept(visitor)
source = ast.unparse(ast.fix_missing_locations(ast.Module(body=visitor.test_case_ast, type_ignores=[])))
assert (source == expected_result) |
def got4(all_potential_countries) -> operations.GraphOfOperations:
operations_graph = operations.GraphOfOperations()
sub_texts = operations.Generate(1, 1)
operations_graph.append_operation(sub_texts)
sub_paragraphs = []
for i in range(1, 5):
paragraph_id = f'Paragraph {i}'
sub_text = operations.Selector((lambda thoughts, list_id=paragraph_id: [thought for thought in thoughts if (thought.state['part'] == list_id)]))
sub_text.add_predecessor(sub_texts)
operations_graph.add_operation(sub_text)
count_sub_text = operations.Generate(1, 10)
count_sub_text.add_predecessor(sub_text)
operations_graph.add_operation(count_sub_text)
score_sub_text = operations.Score(1, False, partial(num_errors, all_potential_countries))
score_sub_text.add_predecessor(count_sub_text)
operations_graph.add_operation(score_sub_text)
keep_best_sub_text = operations.KeepBestN(1, False)
keep_best_sub_text.add_predecessor(score_sub_text)
operations_graph.add_operation(keep_best_sub_text)
sub_paragraphs.append(keep_best_sub_text)
while (len(sub_paragraphs) > 1):
new_sub_paragraphs = []
for i in range(0, len(sub_paragraphs), 2):
aggregate = operations.Aggregate(3)
aggregate.add_predecessor(sub_paragraphs[i])
aggregate.add_predecessor(sub_paragraphs[(i + 1)])
operations_graph.add_operation(aggregate)
val_im_aggregate = operations.ValidateAndImprove(1, True, 3, valid_aggregation)
val_im_aggregate.add_predecessor(aggregate)
operations_graph.add_operation(val_im_aggregate)
score_aggregate = operations.Score(1, False, partial(num_errors, all_potential_countries))
score_aggregate.add_predecessor(val_im_aggregate)
operations_graph.add_operation(score_aggregate)
keep_best_aggregate = operations.KeepBestN(1, False)
keep_best_aggregate.add_predecessor(score_aggregate)
operations_graph.add_operation(keep_best_aggregate)
new_sub_paragraphs.append(keep_best_aggregate)
sub_paragraphs = new_sub_paragraphs
operations_graph.append_operation(operations.GroundTruth(test_keyword_counting))
return operations_graph |
.parametrize('version, details', (('3.0.2', "The provided definition doesn't match any of the expected formats or types."), ('3.1.0', "'type' is a required property")))
def test_invalid_schema_with_disabled_validation(testdir, cli, openapi_3_schema_with_invalid_security, version, details, snapshot_cli):
openapi_3_schema_with_invalid_security['openapi'] = version
schema_file = testdir.make_openapi_schema_file(openapi_3_schema_with_invalid_security)
assert (cli.run(str(schema_file), '--dry-run', '--experimental=openapi-3.1') == snapshot_cli) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, affine=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, affine=False)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes), affine=False))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out |
def __loc_alias(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
return (__starts_with(anaphor_cleaned_tokens, antecedent_cleaned_tokens) or __is_abbreviation(anaphor_cleaned_tokens, antecedent_cleaned_tokens)) |
class Settings():
def __init__(self, file):
self._filepath = os.path.split(file)[0]
with open(file) as fb:
self._data = json.load(fb)
if (self._data['version'] != 2):
raise Exception(('incorrect file version, expected 2 but got ' + self._data['version']))
def is_synthetic_dataset(self):
return (self._data['dataset']['syntheticDataset'] >= 0)
def get_synthetic_dataset_type(self) -> pyrenderer.ImplicitEquation:
assert self.is_synthetic_dataset()
return pyrenderer.ImplicitEquation(int(self._data['dataset']['syntheticDataset']))
def get_dataset_name(self):
if self.is_synthetic_dataset():
t = self.get_synthetic_dataset_type()
return t.name
else:
path = self._data['dataset']['file']
return os.path.split(path)[(- 1)]
def load_dataset(self, resolution: Optional[int]=None) -> pyrenderer.Volume:
if self.is_synthetic_dataset():
if (resolution is None):
resolution = (1 << int(self._data['dataset']['syntheticDatasetResolutionPower']))
t = self.get_synthetic_dataset_type()
return pyrenderer.Volume.create_implicit(t, resolution)
else:
path = self._data['dataset']['file'].replace('\\', '/')
if (not os.path.isabs(path)):
path = os.path.abspath(os.path.join(self._filepath, path))
print('convert relative path to absolute path, load', path)
return pyrenderer.Volume(path)
class CameraConfig(NamedTuple):
pitch_radians: float
yaw_radians: float
fov_y_radians: float
center: np.ndarray
distance: float
orientation: pyrenderer.Orientation
def get_camera(self) -> CameraConfig:
c = self._data['camera']
return Settings.CameraConfig(np.deg2rad(c['currentPitch']), np.deg2rad(c['currentYaw']), np.deg2rad(c['fov']), np.array(c['lookAt']), (1.0 * (c['zoomSpeed'] ** c['zoomValue'])), pyrenderer.Orientation(int(c['orientation'])))
def get_tf_points(self, min_density: Optional[float]=None, max_density: Optional[float]=None, opacity_scaling: Optional[float]=None, purge_zero_regions: bool=True):
if (min_density is None):
min_density = self._data['tfEditor']['minDensity']
if (max_density is None):
max_density = self._data['tfEditor']['maxDensity']
if (opacity_scaling is None):
opacity_scaling = self._data['tfEditor']['opacityScaling']
key = ('editor' if ('editor' in self._data['tfEditor']) else 'editorLinear')
g = self._data['tfEditor'][key]
return pyrenderer.TFUtils.assemble_from_settings([pyrenderer.real3(v[0], v[1], v[2]) for v in g['colorAxis']], g['densityAxisColor'], g['opacityAxis'], g['densityAxisOpacity'], min_density, max_density, opacity_scaling, purge_zero_regions)
def get_tf_points_texture(self, min_density: Optional[float]=None, max_density: Optional[float]=None, opacity_scaling: Optional[float]=None):
if (min_density is None):
min_density = self._data['tfEditor']['minDensity']
if (max_density is None):
max_density = self._data['tfEditor']['maxDensity']
if (opacity_scaling is None):
opacity_scaling = self._data['tfEditor']['opacityScaling']
g = self._data['tfEditor']['editorTexture']
colorAxis = [pyrenderer.real3(v[0], v[1], v[2]) for v in g['colorAxis']]
densityAxisColor = g['densityAxisColor']
opacityAxis = g['opacities']
densityAxisOpacity = list(np.linspace(0, 1, len(opacityAxis), endpoint=True))
return pyrenderer.TFUtils.assemble_from_settings(colorAxis, densityAxisColor, opacityAxis, densityAxisOpacity, min_density, max_density, opacity_scaling, False)
def get_gaussian_tensor(self, min_density: Optional[float]=None, max_density: Optional[float]=None, opacity_scaling: Optional[float]=None):
if (min_density is None):
min_density = self._data['tfEditor']['minDensity']
if (max_density is None):
max_density = self._data['tfEditor']['maxDensity']
if (opacity_scaling is None):
opacity_scaling = self._data['tfEditor']['opacityScaling']
g = self._data['tfEditor']['editorGaussian']
R = len(g)
tf = np.empty((1, R, 6), dtype=renderer_dtype_np)
for r in range(R):
tf[0][r][0] = g[r][0]
tf[0][r][1] = g[r][1]
tf[0][r][2] = g[r][2]
tf[0][r][3] = (g[r][3] * opacity_scaling)
tf[0][r][4] = (min_density + (g[r][4] * (max_density - min_density)))
tf[0][r][5] = (g[r][5] * (max_density - min_density))
return torch.from_numpy(tf)
def get_stepsize(self):
return self._data['renderer']['stepsize'] |
class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
model_input_names = ['input_features', 'attention_mask']
def __init__(self, feature_size=80, sampling_rate=16000, num_mel_bins=80, padding_value=0.0, do_ceptral_normalize=True, normalize_means=True, normalize_vars=True, **kwargs):
if (not is_torchaudio_available()):
raise ImportError('`Speech2TextFeatureExtractor` requires torchaudio: `pip install torchaudio`.')
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.num_mel_bins = num_mel_bins
self.do_ceptral_normalize = do_ceptral_normalize
self.normalize_means = normalize_means
self.normalize_vars = normalize_vars
self.return_attention_mask = True
def _extract_fbank_features(self, waveform: np.ndarray) -> np.ndarray:
waveform = (waveform * (2 ** 15))
waveform = torch.from_numpy(waveform).unsqueeze(0)
features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate)
return features.numpy()
def utterance_cmvn(x: np.ndarray, normalize_means: Optional[bool]=True, normalize_vars: Optional[bool]=True) -> np.ndarray:
mean = x.mean(axis=0)
square_sums = (x ** 2).sum(axis=0)
if normalize_means:
x = np.subtract(x, mean)
if normalize_vars:
var = ((square_sums / x.shape[0]) - (mean ** 2))
std = np.sqrt(np.maximum(var, 1e-10))
x = np.divide(x, std)
return x
def normalize(self, input_values: List[np.ndarray]) -> List[np.ndarray]:
return [self.utterance_cmvn(x, self.normalize_means, self.normalize_vars) for x in input_values]
def __call__(self, raw_speech: Union[(np.ndarray, List[float], List[np.ndarray], List[List[float]])], padding: Union[(bool, str, PaddingStrategy)]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, sampling_rate: Optional[int]=None, return_attention_mask: Optional[bool]=None, **kwargs) -> BatchFeature:
if (sampling_rate is not None):
if (sampling_rate != self.sampling_rate):
raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}.Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function.Failing to do so can result in silent errors that might be hard to debug.')
is_batched = bool((isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list)))))
if (is_batched and (not isinstance(raw_speech[0], np.ndarray))):
raw_speech = [np.asarray(speech) for speech in raw_speech]
elif ((not is_batched) and (not isinstance(raw_speech, np.ndarray))):
raw_speech = np.asarray(raw_speech)
if (not is_batched):
raw_speech = [raw_speech]
features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
if self.do_ceptral_normalize:
features = self.normalize(features)
encoded_inputs = BatchFeature({'input_features': features})
padded_inputs = self.pad(encoded_inputs, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_tensors=return_tensors, **kwargs)
return padded_inputs |
def richardson_lucy(image, psf, num_iter=50, clip=True, filter_epsilon=None):
float_type = _supported_float_type(image.dtype)
image = image.astype(float_type, copy=False)
psf = psf.astype(float_type, copy=False)
im_deconv = np.full(image.shape, 0.5, dtype=float_type)
psf_mirror = np.flip(psf)
eps = 1e-12
for _ in range(num_iter):
conv = (convolve(im_deconv, psf, mode='same') + eps)
if filter_epsilon:
relative_blur = np.where((conv < filter_epsilon), 0, (image / conv))
else:
relative_blur = (image / conv)
im_deconv *= convolve(relative_blur, psf_mirror, mode='same')
if clip:
im_deconv[(im_deconv > 1)] = 1
im_deconv[(im_deconv < (- 1))] = (- 1)
return im_deconv |
_module()
class MultiLevelNeck(nn.Module):
def __init__(self, in_channels, out_channels, scales=[0.5, 1, 2, 4], norm_cfg=None, act_cfg=None):
super(MultiLevelNeck, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.scales = scales
self.num_outs = len(scales)
self.lateral_convs = nn.ModuleList()
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.lateral_convs.append(ConvModule(in_channel, out_channels, kernel_size=1, norm_cfg=norm_cfg, act_cfg=act_cfg))
for _ in range(self.num_outs):
self.convs.append(ConvModule(out_channels, out_channels, kernel_size=3, padding=1, stride=1, norm_cfg=norm_cfg, act_cfg=act_cfg))
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
print(inputs[0].shape)
inputs = [lateral_conv(inputs[i]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
if (len(inputs) == 1):
inputs = [inputs[0] for _ in range(self.num_outs)]
outs = []
for i in range(self.num_outs):
x_resize = F.interpolate(inputs[i], scale_factor=self.scales[i], mode='bilinear')
outs.append(self.convs[i](x_resize))
return tuple(outs) |
class BinarySoftF1Loss(nn.Module):
def __init__(self, ignore_index: Optional[int]=None, eps=1e-06):
super().__init__()
self.ignore_index = ignore_index
self.eps = eps
def forward(self, preds: Tensor, targets: Tensor) -> Tensor:
targets = targets.view((- 1))
preds = preds.view((- 1))
if (self.ignore_index is not None):
not_ignored = (targets != self.ignore_index)
preds = preds[not_ignored]
targets = targets[not_ignored]
if (targets.numel() == 0):
return torch.tensor(0, dtype=preds.dtype, device=preds.device)
preds = preds.sigmoid().clamp(self.eps, (1 - self.eps))
return soft_micro_f1(preds.view((- 1), 1), targets.view((- 1), 1)) |
class DebertaV2ForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _get_default_scheme():
if (os.name == 'posix'):
return 'posix_prefix'
return os.name |
class Wander():
def calculate(self, boid):
wander_value = (getattr(boid, 'wander_value', 0.0) + uniform((- 0.5), 0.5))
if (wander_value < (- 2)):
wander_value = (- 2)
elif (wander_value > 2):
wander_value = 2
boid.wander_value = wander_value
desired_velocity = ((boid.localy.scale(8) + boid.localx.scale(wander_value)).normalize() * boid.desired_speed)
steering = (desired_velocity - boid.velocity)
return steering |
def exp_t(u, t):
if (t == 1):
return u.exp()
else:
return (1.0 + ((1.0 - t) * u)).relu().pow((1.0 / (1.0 - t))) |
_representation(onnx.defs.OpSchema.FormalParameter, type_str='typeStr', param_type='option', homogeneous='isHomogeneous')
class ONNXParameter():
name = Property(dtype=str, desc='The parameter name')
description = Property(dtype=str, desc='A description of the parameter')
type_str = Property(dtype=str, desc='The type string of this parameter')
param_type = Property(choices=ONNXParameterType, desc='The type of the this parameter', default=ONNXParameterType.Single)
homogeneous = Property(dtype=bool, desc='Whether this parameter is homogeneous')
def __repr__(self):
return '{} ({})'.format(self.name, str(self.param_type)) |
def get_gdm():
_a = data.ply_where((X.method == 'gdm')).ply_select('*', test_metric=X.MSE)
_a = VirtualValidation(_a).fit((cv_group + ['method']), [('valid_error', {'larger_is_better': False})])
return _a[(cv_group + ['target_c', 'method', 'test_metric'])] |
def greedy_search_comma(input_string, predefined_list):
chunks = input_string.split(',')
results = []
buffer = ''
for chunk in chunks:
buffer = ((buffer + chunk) if buffer else chunk)
if (buffer.strip() in predefined_list):
results.append(buffer.strip())
buffer = ''
elif (buffer in predefined_list):
results.append(buffer.strip())
buffer = ''
else:
buffer += ','
if (buffer and (buffer.strip() in predefined_list)):
results.append(buffer.strip())
return results |
class RESetParallelIterator(RESetMapReduce):
def map_function(self, z):
return (z,)
reduce_init = tuple
def __iter__(self):
self.setup_workers(reduce_locally=False)
self.start_workers()
active_proc = self._nprocess
while True:
newres = self._results.get()
if (newres is not None):
logger.debug('Got some results')
(yield from newres)
else:
active_proc -= 1
if (active_proc == 0):
break
self.finish() |
def test_junction_error_massages():
error = 'The input unit for the Josephson Junction is not correct. Look at the documentation for the correct input format.'
with pytest.raises(ValueError, match=error):
Junction(10, 'F') |
class TunableMeta(type):
def __getitem__(cls, values):
if (not isinstance(values, tuple)):
values = (values,)
return type('Tunable_', (Tunable,), {'__args__': values}) |
def convert_all_pt_checkpoints_to_tf(args_model_type, tf_dump_path, model_shortcut_names_or_path=None, config_shortcut_names_or_path=None, compare_with_pt_model=False, use_cached_models=False, only_convert_finetuned_models=False):
assert os.path.isdir(args.tf_dump_path), '--tf_dump_path should be a directory'
if (args_model_type is None):
model_types = list(MODEL_CLASSES.keys())
else:
model_types = [args_model_type]
for (j, model_type) in enumerate(model_types, start=1):
print(('=' * 100))
print(' Converting model type {}/{}: {}'.format(j, len(model_types), model_type))
print(('=' * 100))
if (model_type not in MODEL_CLASSES):
raise ValueError('Unrecognized model type {}, should be one of {}.'.format(model_type, list(MODEL_CLASSES.keys())))
(config_class, model_class, loading_fct, pt_model_class, aws_model_maps, aws_config_map) = MODEL_CLASSES[model_type]
if (model_shortcut_names_or_path is None):
model_shortcut_names_or_path = list(aws_model_maps.keys())
if (config_shortcut_names_or_path is None):
config_shortcut_names_or_path = model_shortcut_names_or_path
for (i, (model_shortcut_name, config_shortcut_name)) in enumerate(zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1):
print(('-' * 100))
if (('-squad' in model_shortcut_name) or ('-mrpc' in model_shortcut_name) or ('-mnli' in model_shortcut_name)):
if (not only_convert_finetuned_models):
print(' Skipping finetuned checkpoint {}'.format(model_shortcut_name))
continue
model_type = model_shortcut_name
elif only_convert_finetuned_models:
print(' Skipping not finetuned checkpoint {}'.format(model_shortcut_name))
continue
print(' Converting checkpoint {}/{}: {} - model_type {}'.format(i, len(aws_config_map), model_shortcut_name, model_type))
print(('-' * 100))
if (config_shortcut_name in aws_config_map):
config_file = cached_path(aws_config_map[config_shortcut_name], force_download=(not use_cached_models))
else:
config_file = cached_path(config_shortcut_name, force_download=(not use_cached_models))
if (model_shortcut_name in aws_model_maps):
model_file = cached_path(aws_model_maps[model_shortcut_name], force_download=(not use_cached_models))
else:
model_file = cached_path(model_shortcut_name, force_download=(not use_cached_models))
convert_pt_checkpoint_to_tf(model_type, model_file, config_file, os.path.join(tf_dump_path, (model_shortcut_name + '-tf_model.h5')), compare_with_pt_model=compare_with_pt_model)
os.remove(config_file)
os.remove(model_file) |
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
self.channel = 1
self.window = create_window(window_size)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if ((channel == self.channel) and (self.window.dtype == img1.dtype)):
window = self.window
else:
window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
return ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average) |
def SetEnvVar(env_var, value):
env_var = env_var.upper()
if (value is not None):
os.environ[env_var] = value
elif (env_var in os.environ):
del os.environ[env_var] |
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0, activation='relu', normalize_before=False):
super().__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = (tgt + self.dropout(tgt2))
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = (tgt + self.dropout(tgt2))
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt) |
(0.2)
def verify_security_question(entities, *argv, **kargs):
if (entities['security_question'].lower() == 'toyota camry'):
return resp(True, msg='OK')
else:
return resp(False, msg='Sorry, the answer of the security question is wrong.') |
def read_xyz(file_path):
with open(file_path, 'r') as f:
n_atoms = None
(R, z) = ([], [])
for (i, line) in enumerate(f):
line = line.strip()
if (not n_atoms):
n_atoms = int(line)
cols = line.split()
(file_i, line_i) = divmod(i, (n_atoms + 2))
if (line_i >= 2):
R.append(list(map(float, cols[1:4])))
if (file_i == 0):
z.append(_z_str_to_z_dict[cols[0]])
R = np.array(R).reshape((- 1), (3 * n_atoms))
z = np.array(z)
f.close()
return (R, z) |
def get_summary_writer(cfg: DictConfig, job_type: str):
outdir = Path(cfg.get('outdir', os.getcwd()))
jobdir = outdir.joinpath(job_type)
sdir = jobdir.joinpath('summaries')
sdir.mkdir(exist_ok=True, parents=True)
return tf.summary.create_file_writer(sdir.as_posix()) |
def get_device_properties(device):
if (not _initialized):
init()
if ((device < 0) or (device >= device_count())):
raise AssertionError('Invalid device id')
return _get_device_properties(device) |
class BallQuery(Function):
def forward(ctx, radius, nsample, xyz, new_xyz):
output = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(output)
return output
def backward(ctx, grad_out):
return () |
def positive_tagging(tagging_scheme, slot_name, slot_size):
if (slot_name == OUTSIDE):
return [OUTSIDE for _ in range(slot_size)]
if (tagging_scheme == TaggingScheme.IO):
tags = [(INSIDE_PREFIX + slot_name) for _ in range(slot_size)]
elif (tagging_scheme == TaggingScheme.BIO):
if (slot_size > 0):
tags = [(BEGINNING_PREFIX + slot_name)]
tags += [(INSIDE_PREFIX + slot_name) for _ in range(1, slot_size)]
else:
tags = []
elif (tagging_scheme == TaggingScheme.BILOU):
if (slot_size == 0):
tags = []
elif (slot_size == 1):
tags = [(UNIT_PREFIX + slot_name)]
else:
tags = [(BEGINNING_PREFIX + slot_name)]
tags += [(INSIDE_PREFIX + slot_name) for _ in range(1, (slot_size - 1))]
tags.append((LAST_PREFIX + slot_name))
else:
raise ValueError(('Invalid tagging scheme %s' % tagging_scheme))
return tags |
class InputProjectionA(nn.Module):
def __init__(self, samplingTimes):
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, samplingTimes):
self.pool.append(nn.AvgPool3d(3, stride=2, padding=1))
def forward(self, input):
for pool in self.pool:
input = pool(input)
return input |
def get_effecitve_match_source(s, start, end):
_start = (- 1)
for i in range(start, (start - 2), (- 1)):
if (i < 0):
_start = (i + 1)
break
if is_span_separator(s[i]):
_start = i
break
if (_start < 0):
return None
_end = (- 1)
for i in range((end - 1), (end + 3)):
if (i >= len(s)):
_end = (i - 1)
break
if is_span_separator(s[i]):
_end = i
break
if (_end < 0):
return None
while ((_start < len(s)) and is_span_separator(s[_start])):
_start += 1
while ((_end >= 0) and is_span_separator(s[_end])):
_end -= 1
return Match(_start, ((_end - _start) + 1)) |
def get_simple_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
loc_beta = 0.0
scale_beta = 1.0
beta_prior = dist.Normal(loc_beta, scale_beta)
X = torch.rand(N, (K + 1), device=device)
Y = torch.rand(N, 1, device=device)
beta_value = beta_prior.sample(((K + 1), 1))
beta_value.requires_grad_(True)
def forward(beta_value: Tensor) -> Tensor:
mu = X.mm(beta_value)
score = (dist.Bernoulli(logits=mu).log_prob(Y).sum() + beta_prior.log_prob(beta_value).sum())
return score
return (forward, (beta_value.to(device),)) |
def _maybe_create_densepose_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
if (not cfg.MODEL.DENSEPOSE_ON):
return None
use_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
def has_densepose_annotations(instance: Instance) -> bool:
for ann in instance['annotations']:
if (all(((key in ann) for key in DENSEPOSE_COCO_KEYS_WITHOUT_MASK)) and ((DENSEPOSE_COCO_MASK_KEY in ann) or ('segmentation' in ann))):
return True
if (use_masks and ('segmentation' in ann)):
return True
return False
return has_densepose_annotations |
class UnivariateStatistic(BaseStatistic):
def update(self, num):
pass
def get(self):
pass
def remove(self, num):
pass |
def count_message_tokens(messages: List[Message], model: str='gpt-3.5-turbo-0301') -> int:
if model.startswith('gpt-3.5-turbo'):
tokens_per_message = 4
tokens_per_name = (- 1)
encoding_model = 'gpt-3.5-turbo'
elif (model.startswith('gpt-4') or (model == 'openai/gpt-4-0314')):
tokens_per_message = 3
tokens_per_name = 1
encoding_model = 'gpt-4'
elif model.startswith('claude-v1'):
tokens_per_message = 1
tokens_per_name = 1
encoding_model = 'claude-v1'
else:
raise NotImplementedError(f'''count_message_tokens() is not implemented for model {model}.
See for information on how messages are converted to tokens.''')
try:
encoding = tiktoken.encoding_for_model(encoding_model)
except KeyError:
logger.warn('Warning: model not found. Using cl100k_base encoding.')
encoding = tiktoken.get_encoding('cl100k_base')
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for (key, value) in message.raw().items():
num_tokens += len(encoding.encode(value))
if (key == 'name'):
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens |
def run_webapp(pickle_path):
run_file = subprocess.run(['streamlit', 'run', APP_PATH, '--', '--path', pickle_path])
return run_file |
class TFMPNetForQuestionAnswering():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def check_hexapod_dart_simu(conf):
includes_check = ['/usr/local/include', '/usr/include']
libs_check = ['/usr/local/lib', '/usr/lib']
if ('RESIBOTS_DIR' in os.environ):
includes_check = ([(os.environ['RESIBOTS_DIR'] + '/include')] + includes_check)
libs_check = ([(os.environ['RESIBOTS_DIR'] + '/lib')] + libs_check)
if conf.options.simu:
includes_check = [(conf.options.simu + '/include')]
libs_check = [(conf.options.simu + '/lib')]
try:
conf.start_msg('Checking for hexapod_dart_simu includes')
res = conf.find_file('hexapod_dart/hexapod_dart_simu.hpp', includes_check)
conf.end_msg('ok')
conf.env.INCLUDES_HEXAPOD_DART_SIMU = includes_check
except:
conf.end_msg('Not found', 'RED')
return
return 1 |
def make_transients_persistent(sdfg: SDFG, device: dtypes.DeviceType, toplevel_only: bool=True) -> Dict[(int, Set[str])]:
result: Dict[(int, Set[str])] = {}
for nsdfg in sdfg.all_sdfgs_recursive():
fsyms: Set[str] = nsdfg.free_symbols
persistent: Set[str] = set()
not_persistent: Set[str] = set()
for state in nsdfg.nodes():
for dnode in state.data_nodes():
if (dnode.data in not_persistent):
continue
if (dnode.data in nsdfg.constants_prop):
not_persistent.add(dnode.data)
continue
desc = dnode.desc(nsdfg)
if ((not desc.transient) or (type(desc) not in {dt.Array, dt.Scalar})):
not_persistent.add(dnode.data)
continue
if (desc.storage == dtypes.StorageType.Register):
not_persistent.add(dnode.data)
continue
try:
if (set(map(str, desc.total_size.free_symbols)) - fsyms):
not_persistent.add(dnode.data)
continue
except AttributeError:
pass
if (xfh.get_parent_map(state, dnode) is not None):
if toplevel_only:
not_persistent.add(dnode.data)
continue
elif (desc.lifetime == dtypes.AllocationLifetime.Scope):
not_persistent.add(dnode.data)
continue
if (desc.lifetime == dtypes.AllocationLifetime.External):
not_persistent.add(dnode.data)
continue
persistent.add(dnode.data)
for aname in (persistent - not_persistent):
nsdfg.arrays[aname].lifetime = dtypes.AllocationLifetime.Persistent
result[nsdfg.sdfg_id] = (persistent - not_persistent)
if (device == dtypes.DeviceType.GPU):
for (n, _) in sdfg.all_nodes_recursive():
if isinstance(n, SDFGState):
for edge in n.edges():
edge.data.wcr_nonatomic = False
return result |
def _open_out_file(filename):
if (filename in ['NUL:', '/dev/null']):
return dev_null
else:
return open(filename, 'wb') |
def _batch_mahalanobis(bL, bx):
n = bx.size((- 1))
bx_batch_shape = bx.shape[:(- 1)]
bx_batch_dims = len(bx_batch_shape)
bL_batch_dims = (bL.dim() - 2)
outer_batch_dims = (bx_batch_dims - bL_batch_dims)
old_batch_dims = (outer_batch_dims + bL_batch_dims)
new_batch_dims = (outer_batch_dims + (2 * bL_batch_dims))
bx_new_shape = bx.shape[:outer_batch_dims]
for (sL, sx) in zip(bL.shape[:(- 2)], bx.shape[outer_batch_dims:(- 1)]):
bx_new_shape += ((sx // sL), sL)
bx_new_shape += (n,)
bx = bx.reshape(bx_new_shape)
permute_dims = (((list(range(outer_batch_dims)) + list(range(outer_batch_dims, new_batch_dims, 2))) + list(range((outer_batch_dims + 1), new_batch_dims, 2))) + [new_batch_dims])
bx = bx.permute(permute_dims)
flat_L = bL.reshape((- 1), n, n)
flat_x = bx.reshape((- 1), flat_L.size(0), n)
flat_x_swap = flat_x.permute(1, 2, 0)
M_swap = torch.triangular_solve(flat_x_swap, flat_L, upper=False)[0].pow(2).sum((- 2))
M = M_swap.t()
permuted_M = M.reshape(bx.shape[:(- 1)])
permute_inv_dims = list(range(outer_batch_dims))
for i in range(bL_batch_dims):
permute_inv_dims += [(outer_batch_dims + i), (old_batch_dims + i)]
reshaped_M = permuted_M.permute(permute_inv_dims)
return reshaped_M.reshape(bx_batch_shape) |
class ConditionalBatchNorm2d_for_skip_and_shared(nn.Module):
def __init__(self, num_features, z_dims_after_concat, spectral_norm):
super().__init__()
self.num_features = num_features
self.bn = batchnorm_2d(num_features, eps=0.0001, momentum=0.1, affine=False)
if spectral_norm:
self.gain = snlinear(z_dims_after_concat, num_features, bias=False)
self.bias = snlinear(z_dims_after_concat, num_features, bias=False)
else:
self.gain = linear(z_dims_after_concat, num_features, bias=False)
self.bias = linear(z_dims_after_concat, num_features, bias=False)
def forward(self, x, y):
gain = (1 + self.gain(y)).view(y.size(0), (- 1), 1, 1)
bias = self.bias(y).view(y.size(0), (- 1), 1, 1)
out = self.bn(x)
return ((out * gain) + bias) |
def visualize_attention(writer, attention_map, iteration, threshold=0):
stage = 'valid'
for i in range(len(attention_map)):
C = attention_map[i].shape[1]
attention_map_sb = F.interpolate(attention_map[i], C, mode='nearest')
attention_map_sb = attention_map_sb[0].transpose(0, 1).unsqueeze(0)
attention_map_sb = torch.cat((torch.ones(1, C, C).cuda(), torch.abs((attention_map_sb - 1.0)), torch.abs((attention_map_sb - 1.0))), 0)
attention_map_sb = vutils.make_grid(attention_map_sb, padding=5, normalize=False, range=(threshold, 1))
writer.add_image(((stage + '/Attention/Row-wise-') + str(i)), attention_map_sb, iteration) |
class PSPHead(BaseSegHead):
def __init__(self, bins=(1, 2, 3, 6), **kwargs):
super(PSPHead, self).__init__(**kwargs)
self.bins = bins
self.psp = PPM(self.bins, self.in_channels, self.channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.bottleneck = ConvModule((self.in_channels + (len(self.bins) * self.channels)), self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def forward(self, x):
output = [x]
output.extend(self.psp(x))
output = torch.cat(output, dim=1)
output = self.bottleneck(output)
return self.classify(output) |
class RoFormerTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
def construct_rfv_to_ev(rfv_dictionary, q, d, verbose=False):
P = {(v,): [] for v in range(2, q)}
for exponent_vector in rfv_dictionary:
residue_field_vector = rfv_dictionary[exponent_vector]
rf_vector_start = (residue_field_vector[0],)
rf_vector_end = residue_field_vector[1:]
P[rf_vector_start].append([exponent_vector, rf_vector_end])
if verbose:
print('Populated P. Currently it has ', len(P), 'keys.')
for j in range((d - 1)):
if verbose:
print('Constructing ', j, ' th place of the residue field vectors, out of ', (d - 1), ' total.')
P_new = {}
garbage = {}
for rf_vector_start in P:
for w in range(2, q):
new_rf_vector_start = tuple((list(rf_vector_start) + [w]))
P_new[new_rf_vector_start] = []
for (exponent_vector, rf_vector_end) in P[rf_vector_start]:
new_rf_vector_end = rf_vector_end[1:]
w = rf_vector_end[0]
new_rf_vector_start = tuple((list(rf_vector_start) + [w]))
P_new[new_rf_vector_start].append([exponent_vector, new_rf_vector_end])
if verbose:
print('P_new is populated with ', len(P_new), ' keys.')
for rf_vector_start in P_new:
if (rf_vector_start[(- 1)] < ((q + 3) / 2)):
rf_vector_complement_start = tuple([((q + 1) - j) for j in rf_vector_start])
if ((P_new[rf_vector_start] == []) or (P_new[rf_vector_complement_start] == [])):
garbage[rf_vector_start] = True
garbage[rf_vector_complement_start] = True
for rf_vector_start in garbage:
P_new.pop(rf_vector_start, 0)
if verbose:
print('After removing incompatible entries, P_new is down to ', len(P_new), ' keys.')
P = P_new.copy()
for residue_field_vector in P:
P[residue_field_vector] = [a[0] for a in P[residue_field_vector]]
if verbose:
print('Returning dictionary P with ', len(P), ' keys.')
return P.copy() |
def FwFMEstimator(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 128, 64), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_field_strength=1e-05, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', model_dir=None, config=None, linear_optimizer='Ftrl', dnn_optimizer='Adagrad', training_chief_hooks=None):
def _model_fn(features, labels, mode, config):
train_flag = (mode == tf.estimator.ModeKeys.TRAIN)
linear_logits = get_linear_logit(features, linear_feature_columns, l2_reg_linear=l2_reg_linear)
final_logit_components = [linear_logits]
with variable_scope(DNN_SCOPE_NAME):
(sparse_embedding_list, dense_value_list) = input_from_feature_columns(features, dnn_feature_columns, l2_reg_embedding=l2_reg_embedding)
fwfm_logit = FwFMLayer(num_fields=len(sparse_embedding_list), regularizer=l2_reg_field_strength)(concat_func(sparse_embedding_list, axis=1))
final_logit_components.append(fwfm_logit)
if dnn_hidden_units:
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed=seed)(dnn_input, training=train_flag)
dnn_logit = tf.keras.layers.Dense(1, use_bias=False, kernel_initializer=tf.keras.initializers.glorot_normal(seed))(dnn_output)
final_logit_components.append(dnn_logit)
logits = add_func(final_logit_components)
return deepctr_model_fn(features, mode, logits, labels, task, linear_optimizer, dnn_optimizer, training_chief_hooks=training_chief_hooks)
return tf.estimator.Estimator(_model_fn, model_dir=model_dir, config=config) |
def minimize_noise(show_warnings, ui, for_profiling):
result = {}
cmd = ['sudo', '-n', 'rebench-denoise']
if for_profiling:
cmd += ['--for-profiling']
cmd += ['--json', 'minimize']
try:
output = output_as_str(subprocess.check_output(cmd, stderr=subprocess.STDOUT))
try:
result = json.loads(output)
got_json = True
except ValueError:
got_json = False
except subprocess.CalledProcessError as e:
output = output_as_str(e.output)
got_json = False
except FileNotFoundError as e:
output = str(e)
got_json = False
msg = 'Minimizing noise with rebench-denoise failed\n'
msg += '{ind}possibly causing benchmark results to vary more.\n\n'
success = False
use_nice = False
use_shielding = False
if got_json:
failed = ''
for (k, value) in result.items():
if (value == 'failed'):
failed += (('{ind}{ind} - ' + k) + '\n')
if failed:
msg += (('{ind}Failed to set:\n' + failed) + '\n')
use_nice = result.get('can_set_nice', False)
use_shielding = result.get('shielding', False)
if ((not use_nice) and show_warnings):
msg += ((('{ind}Process niceness could not be set.\n' + '{ind}{ind}`nice` is used to elevate the priority of the benchmark,\n') + '{ind}{ind}without it, other processes my interfere with it') + ' nondeterministically.\n')
if ((not use_shielding) and show_warnings):
msg += ((('{ind}Core shielding could not be set up.\n' + '{ind}{ind}Shielding is used to restrict the use of cores to') + ' benchmarking.\n') + '{ind}{ind}Without it, there my be more nondeterministic interference.\n')
if (use_nice and use_shielding and (not failed)):
success = True
elif ('password is required' in output):
try:
denoise_cmd = output_as_str(subprocess.check_output('which rebench-denoise', shell=True))
except subprocess.CalledProcessError:
denoise_cmd = '$PATH_TO/rebench-denoise'
msg += ('{ind}Please make sure `sudo rebench-denoise`' + ' can be used without password.\n')
msg += '{ind}To be able to run rebench-denoise without password,\n'
msg += '{ind}add the following to the end of your sudoers file (using visudo):\n'
msg += (((('{ind}{ind}' + getpass.getuser()) + ' ALL = (root) NOPASSWD:SETENV: ') + denoise_cmd) + '\n')
elif ('command not found' in output):
msg += '{ind}Please make sure `rebench-denoise` is on the PATH\n'
elif ("No such file or directory: 'sudo'" in output):
msg += "{ind}sudo is not available. Can't use rebench-denoise to manage the system.\n"
else:
msg += ('{ind}Error: ' + escape_braces(output))
if ((not success) and show_warnings):
ui.warning(msg)
return DenoiseResult(success, msg, use_nice, use_shielding, result) |
class WindowedSlopeBanditTeacher():
def __init__(self, env, policy, window_size=10, abs=False, writer=None):
self.env = env
self.policy = policy
self.window_size = window_size
self.abs = abs
self.scores = [deque(maxlen=window_size) for _ in range(env.num_actions)]
self.timesteps = [deque(maxlen=window_size) for _ in range(env.num_actions)]
self.writer = writer
def teach(self, num_timesteps=2000):
for t in range(num_timesteps):
slopes = [(estimate_slope(timesteps, scores) if (len(scores) > 1) else 1) for (timesteps, scores) in zip(self.timesteps, self.scores)]
p = self.policy((np.abs(slopes) if self.abs else slopes))
(r, train_done, val_done) = self.env.step(p)
if val_done:
return self.env.model.epochs
for (a, s) in enumerate(r):
if (not np.isnan(s)):
self.scores[a].append(s)
self.timesteps[a].append(t)
if self.writer:
for i in range(self.env.num_actions):
add_summary(self.writer, ('slopes/task_%d' % (i + 1)), slopes[i], self.env.model.epochs)
add_summary(self.writer, ('probabilities/task_%d' % (i + 1)), p[i], self.env.model.epochs)
return self.env.model.epochs |
def count_all_paths_with_label_in_frame_inefficient(fsa: Fsa, num_frames: int, frame_idx: int, label: str) -> int:
return len([path for path in iterate_all_paths(fsa=fsa, num_frames=num_frames) if (path[frame_idx].label == label)]) |
_HEADS_REGISTRY.register()
class DensePoseROIHeads(StandardROIHeads):
def __init__(self, cfg, input_shape):
super().__init__(cfg, input_shape)
self._init_densepose_head(cfg, input_shape)
def _init_densepose_head(self, cfg, input_shape):
self.densepose_on = cfg.MODEL.DENSEPOSE_ON
if (not self.densepose_on):
return
self.densepose_data_filter = build_densepose_data_filter(cfg)
dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION
dp_pooler_sampling_ratio = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO
dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE
self.use_decoder = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON
if self.use_decoder:
dp_pooler_scales = ((1.0 / input_shape[self.in_features[0]].stride),)
else:
dp_pooler_scales = tuple(((1.0 / input_shape[k].stride) for k in self.in_features))
in_channels = [input_shape[f].channels for f in self.in_features][0]
if self.use_decoder:
self.decoder = Decoder(cfg, input_shape, self.in_features)
self.densepose_pooler = ROIPooler(output_size=dp_pooler_resolution, scales=dp_pooler_scales, sampling_ratio=dp_pooler_sampling_ratio, pooler_type=dp_pooler_type)
self.densepose_head = build_densepose_head(cfg, in_channels)
self.densepose_predictor = build_densepose_predictor(cfg, self.densepose_head.n_out_channels)
self.densepose_losses = build_densepose_losses(cfg)
self.embedder = build_densepose_embedder(cfg)
def _forward_densepose(self, features: Dict[(str, torch.Tensor)], instances: List[Instances]):
if (not self.densepose_on):
return ({} if self.training else instances)
features_list = [features[f] for f in self.in_features]
if self.training:
(proposals, _) = select_foreground_proposals(instances, self.num_classes)
(features_list, proposals) = self.densepose_data_filter(features_list, proposals)
if (len(proposals) > 0):
proposal_boxes = [x.proposal_boxes for x in proposals]
if self.use_decoder:
features_list = [self.decoder(features_list)]
features_dp = self.densepose_pooler(features_list, proposal_boxes)
densepose_head_outputs = self.densepose_head(features_dp)
densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)
densepose_loss_dict = self.densepose_losses(proposals, densepose_predictor_outputs, embedder=self.embedder)
return densepose_loss_dict
else:
pred_boxes = [x.pred_boxes for x in instances]
if self.use_decoder:
features_list = [self.decoder(features_list)]
features_dp = self.densepose_pooler(features_list, pred_boxes)
if (len(features_dp) > 0):
densepose_head_outputs = self.densepose_head(features_dp)
densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)
else:
densepose_predictor_outputs = None
densepose_inference(densepose_predictor_outputs, instances)
return instances
def forward(self, images: ImageList, features: Dict[(str, torch.Tensor)], proposals: List[Instances], targets: Optional[List[Instances]]=None):
(instances, losses) = super().forward(images, features, proposals, targets)
del targets, images
if self.training:
losses.update(self._forward_densepose(features, instances))
return (instances, losses)
def forward_with_given_boxes(self, features: Dict[(str, torch.Tensor)], instances: List[Instances]):
instances = super().forward_with_given_boxes(features, instances)
instances = self._forward_densepose(features, instances)
return instances |
class DateMatcher(RegexMatchEach):
def __init__(self, *children, **kwargs):
kwargs['attrib'] = 'ner_tags'
kwargs['rgx'] = 'DATE'
super(DateMatcher, self).__init__(*children, **kwargs) |
def repetitive_adjacent_analysis(history: List[List[Set[Node]]], L, P):
for (i, found_sets) in enumerate(history):
lengths = [len(x) for x in found_sets]
print(f'-I- merge {i} Found set lengths {lengths}')
for l in lengths:
if ((l % P) == 0):
k = (l // P)
print(f' Found set of size {l} splitting it to {k}*{P} groups')
elif (l > P):
print(f' Found set of size {l}, currently ignoring') |
def ngrams(sen, n):
sen = sen.split(' ')
output = []
for i in range(((len(sen) - n) + 1)):
output.append(tuple(sen[i:(i + n)]))
return output |
def init_logs(opt):
log_dir = safe_path(os.path.join(opt.data_root, 'explog{}'.format(opt.exp_id)))
if opt.istrain:
img_logs = safe_path(os.path.join(log_dir, 'train'))
else:
img_logs = safe_path(os.path.join(log_dir, 'eval'))
weight_logs = safe_path(os.path.join(log_dir, 'weights'))
script_logs = safe_path(os.path.join(log_dir, 'scripts'))
if opt.istrain:
for dir in os.listdir('./'):
if os.path.isdir(os.path.join('./', dir)):
safe_path(os.path.join(script_logs, dir))
for fold in ['data', 'model', 'visual']:
safe_path(os.path.join(script_logs, 'cycle/{}'.format(fold)))
for file in ((glob('*.py') + glob('*/*.py')) + glob('*/*/*.py')):
shutil.copy(file, os.path.join(script_logs, file))
tensor_writer = SummaryWriter(safe_path(os.path.join(log_dir, 'tensorlogs')))
return (img_logs, weight_logs, tensor_writer) |
class TestGather2D(object):
def x(self):
x = tf.constant([[[1, 2], [2, 2], [3, 3]], [[4, 5], [5, 4], [6, 6]], [[7, 7], [8, 7], [9, 9]], [[0, 8], [1, 1], [2, 2]]], dtype=tf.int32)
return x
.usefixtures('clean_test_session')
def test(self, x):
i = tf.constant([[0, 2], [3, 0]], dtype=tf.int32)
j = tf.constant([[1, 1], [0, 2]], dtype=tf.int32)
vals = gather_2d(x, i, j)
correct = np.array([[[2, 2], [8, 7]], [[0, 8], [3, 3]]], dtype=np.int32)
assert_array_almost_equal(correct, vals.eval())
assert (vals.get_shape().as_list() == [2, 2, 2])
.usefixtures('clean_test_session')
def test_broadcast(self, x):
i = tf.constant([[0, 2], [3, 0]], dtype=tf.int32)
j = tf.constant([[1, 2]], dtype=tf.int32)
vals = gather_2d(x, i, j)
correct = np.array([[[2, 2], [9, 9]], [[1, 1], [3, 3]]], dtype=np.int32)
assert_array_almost_equal(correct, vals.eval()) |
class PAL_TD3(object):
def __init__(self, state_dim, action_dim, max_action, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2, alpha=0.4, min_priority=1):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=0.0003)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=0.0003)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.alpha = alpha
self.min_priority = min_priority
self.total_it = 0
def select_action(self, state, test=False):
state = torch.FloatTensor(state.reshape(1, (- 1))).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=256):
self.total_it += 1
(state, action, next_state, reward, not_done) = replay_buffer.sample(batch_size)
with torch.no_grad():
noise = (torch.randn_like(action) * self.policy_noise).clamp((- self.noise_clip), self.noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp((- self.max_action), self.max_action)
(target_Q1, target_Q2) = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = (reward + ((not_done * self.discount) * target_Q))
(current_Q1, current_Q2) = self.critic(state, action)
td_loss1 = (current_Q1 - target_Q)
td_loss2 = (current_Q2 - target_Q)
critic_loss = (self.PAL(td_loss1) + self.PAL(td_loss2))
critic_loss /= torch.max(td_loss1.abs(), td_loss2.abs()).clamp(min=self.min_priority).pow(self.alpha).mean().detach()
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
if ((self.total_it % self.policy_freq) == 0):
actor_loss = (- self.critic.Q1(state, self.actor(state)).mean())
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
for (param, target_param) in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(((self.tau * param.data) + ((1 - self.tau) * target_param.data)))
for (param, target_param) in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(((self.tau * param.data) + ((1 - self.tau) * target_param.data)))
def PAL(self, x):
return torch.where((x.abs() < self.min_priority), (((self.min_priority ** self.alpha) * 0.5) * x.pow(2)), ((self.min_priority * x.abs().pow((1.0 + self.alpha))) / (1.0 + self.alpha))).mean()
def save(self, filename):
torch.save(self.critic.state_dict(), (filename + '_critic'))
torch.save(self.critic_optimizer.state_dict(), (filename + '_critic_optimizer'))
torch.save(self.actor.state_dict(), (filename + '_actor'))
torch.save(self.actor_optimizer.state_dict(), (filename + '_actor_optimizer'))
def load(self, filename):
self.critic.load_state_dict(torch.load((filename + '_critic')))
self.critic_optimizer.load_state_dict(torch.load((filename + '_critic_optimizer')))
self.actor.load_state_dict(torch.load((filename + '_actor')))
self.actor_optimizer.load_state_dict(torch.load((filename + '_actor_optimizer'))) |
def _maybe_create_mask_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
if (not cfg.MODEL.MASK_ON):
return None
def has_mask_annotations(instance: Instance) -> bool:
return any((('segmentation' in ann) for ann in instance['annotations']))
return has_mask_annotations |
class GCN(nn.Layer):
def __init__(self, in_features, out_features, bias=True):
super(GCN, self).__init__()
self.in_features = in_features
self.out_features = out_features
stdv = (1.0 / math.sqrt(self.out_features))
self.weight = self.create_parameter(shape=[self.in_features, self.out_features], dtype='float64', default_initializer=nn.initializer.Uniform(low=(- stdv), high=stdv))
self.add_parameter('weight', self.weight)
if bias:
self.bias = paddle.create_parameter(shape=[self.out_features], dtype='float64', default_initializer=nn.initializer.Uniform(low=(- stdv), high=stdv))
self.add_parameter('bias', self.bias)
else:
self.add_parameter('bias', None)
def forward(self, adj, inputs, identity=False):
if identity:
return paddle.matmul(adj, self.weight)
return paddle.matmul(adj, paddle.matmul(inputs, self.weight)) |
def test_dot_batched_outer_product():
a_raw = torch.tensor([[1.0, 2.0, 3.0], [(- 1.0), (- 2.0), (- 3.0)]])
b_raw = torch.tensor([[4.0, 5.0, 6.0], [4.0, 5.0, 6.0]])
batch_dim = Dim(dimension=2)
a_feature_dim = Dim(dimension=3)
b_feature_dim = Dim(dimension=3)
a = Tensor(name='a', dims=[batch_dim, a_feature_dim], dtype='float32', raw_tensor=a_raw)
b = Tensor(name='b', dims=[batch_dim, b_feature_dim], dtype='float32', raw_tensor=b_raw)
result = rf.matmul(a, b, reduce=[])
assert (result.dims == (batch_dim, a_feature_dim, b_feature_dim))
assert (result.raw_tensor.shape == (2, 3, 3)) |
def make_input_pipeline_from_def(def_dict, mode, **kwargs):
if (not ('class' in def_dict)):
raise ValueError('Input Pipeline definition must have a class property.')
class_ = def_dict['class']
if (not hasattr(sys.modules[__name__], class_)):
raise ValueError('Invalid Input Pipeline class: {}'.format(class_))
pipeline_class = getattr(sys.modules[__name__], class_)
params = {}
if ('params' in def_dict):
params.update(def_dict['params'])
params.update(kwargs)
return pipeline_class(params=params, mode=mode) |
def get_ssa(net, blob_versions=None):
proto = (net.Proto() if isinstance(net, Net) else net)
assert isinstance(proto, caffe2_pb2.NetDef)
if (blob_versions is None):
blob_versions = {}
if isinstance(net, list):
return ([get_ssa(n, blob_versions) for n in net], blob_versions)
for i in proto.external_input:
if (i not in blob_versions):
blob_versions[str(i)] = 0
ssa = []
for op in proto.op:
if (not proto.external_input):
for i in op.input:
if (i not in blob_versions):
blob_versions[i] = 0
inputs = [(str(i), blob_versions.get(str(i), 0)) for i in op.input]
for o in op.output:
blob_versions[str(o)] = (blob_versions.get(str(o), 0) + 1)
outputs = [(str(o), blob_versions[str(o)]) for o in op.output]
ssa.append((inputs, outputs))
return (ssa, blob_versions) |
def rect_2_cxy_wh(rect):
return (np.array([(rect[0] + (rect[2] / 2)), (rect[1] + (rect[3] / 2))]), np.array([rect[2], rect[3]])) |
.parametrize('likelihood_variance', [(- 1), 0.0])
def test_build_svgp_raises_for_invalid_likelihood_variance(likelihood_variance: float) -> None:
(qp, obs) = mock_data()
data = mk_dataset(qp, obs)
search_space = (Box([0.0], [1.0]) ** qp.shape[(- 1)])
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_svgp(data, search_space, likelihood_variance=likelihood_variance) |
def ncr(n, r):
if (r > n):
return 0
r = min(r, (n - r))
numer = reduce(op.mul, range(n, (n - r), (- 1)), 1)
denom = reduce(op.mul, range(1, (r + 1)), 1)
return (numer // denom) |
def convert(name, in_dir, out_dir, resolution, skip_existing):
out_name = f'{name[0]}/{name}'
out_filename = (out_dir / f'{out_name}.json')
if (skip_existing and out_filename.is_file()):
return
music = muspy.read(((in_dir / name[0]) / name))
adjust_resolution(music, resolution)
end_time = music.get_end_time()
if ((end_time > ((resolution * 4) * 2000)) or (end_time < ((resolution * 4) * 10))):
return
out_filename.parent.mkdir(exist_ok=True, parents=True)
music.save(out_filename)
return out_name |
def parallel_execution_analysis(node, part_idx, cache):
if (node.scope in cache):
return cache[node.scope]
elif (node.stage_id != part_idx):
cache[node.scope] = (0, 0)
return (0, 0)
(longest_f, longest_b) = (0, 0)
for n in node.in_edges:
(f, b) = parallel_execution_analysis(n, part_idx, cache)
longest_f = max(f, longest_f)
longest_b = max(b, longest_b)
longest_f += extract_time(node.weight, forward=True)
longest_b += extract_time(node.weight, forward=False)
cache[node.scope] = (longest_f, longest_b)
return (longest_f, longest_b) |
class CythonFunction(CythonVariable):
def __init__(self, module, name, cname, pf_cname, qualified_name, lineno, type=CObject, is_initmodule_function='False'):
super(CythonFunction, self).__init__(name, cname, qualified_name, type, lineno)
self.module = module
self.pf_cname = pf_cname
self.is_initmodule_function = (is_initmodule_function == 'True')
self.locals = {}
self.arguments = []
self.step_into_functions = set() |
def main(args):
dict = dictionary.Dictionary.load(args.dict)
ds = IndexedDataset(args.input, fix_lua_indexing=True)
for tensor_line in ds:
print(dict.string(tensor_line)) |
def test_coerce_to_string_io_with_path():
with tempfile.NamedTemporaryFile(delete=False) as f:
_to_string_io
def func(fh):
assert isinstance(fh, TextIOWrapper)
func(f.name) |
def get_arguments():
parser = argparse.ArgumentParser(description='DeepLab-ResNet Network')
parser.add_argument('--model', type=str, default=MODEL, help='Model Choice (DeeplabMulti/DeeplabVGG/Oracle).')
parser.add_argument('--data-dir', type=str, default=DATA_DIRECTORY, help='Path to the directory containing the Cityscapes dataset.')
parser.add_argument('--data-list', type=str, default=DATA_LIST_PATH, help='Path to the file listing the images in the dataset.')
parser.add_argument('--ignore-label', type=int, default=IGNORE_LABEL, help='The index of the label to ignore during the training.')
parser.add_argument('--num-classes', type=int, default=NUM_CLASSES, help='Number of classes to predict (including background).')
parser.add_argument('--restore-from', type=str, default=RESTORE_FROM, help='Where restore model parameters from.')
parser.add_argument('--set', type=str, default=SET, help='choose evaluation set.')
parser.add_argument('--save', type=str, default=SAVE_PATH, help='Path to save result.')
parser.add_argument('--cpu', action='store_true', help='choose to use cpu device.')
return parser.parse_args() |
def check_foreign_word(word: str) -> int:
word = word.strip()
word = re.sub('[\\u200B-\\u200D]', '', word)
if (not is_valid_malayalam_word(word)):
return 1
if has_sure_patterns(word):
return 1
return 0 |
def upload_onnx_model(model_name, zoo_dir, backup=False, only_local=False):
if only_local:
print('No uploading in local only mode.')
return
model_dir = os.path.join(zoo_dir, model_name)
suffix = ('-backup' if backup else '')
if backup:
print('Backing up the previous version of ONNX model {}...'.format(model_name))
rel_file_name = '{}{}.tar.gz'.format(model_name, suffix)
abs_file_name = os.path.join(zoo_dir, rel_file_name)
print('Compressing {} model to {}'.format(model_name, abs_file_name))
with tarfile.open(abs_file_name, 'w:gz') as f:
f.add(model_dir, arcname=model_name)
file_size = os.stat(abs_file_name).st_size
print('Uploading {} ({} MB) to s3 cloud...'.format(abs_file_name, ((float(file_size) / 1024) / 1024)))
client = boto3.client('s3', 'us-east-1')
transfer = boto3.s3.transfer.S3Transfer(client)
transfer.upload_file(abs_file_name, 'download.onnx', 'models/latest/{}'.format(rel_file_name), extra_args={'ACL': 'public-read'})
print('Successfully uploaded {} to s3!'.format(rel_file_name)) |
class Mish_SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(Mish_SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
self.mish = Mish()
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.mish(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def string_to_list(string: str) -> List[int]:
assert ((string[0] == '[') and (string[(- 1)] == ']')), 'String is not a list.'
return [int(num) for num in string[1:(- 1)].split(',')] |
def render_with_template(text=None, filename=None, preprocessor=None, template_kwargs={}):
from mako.template import Template
from mako import exceptions
tmpl = Template(text=text, filename=filename, preprocessor=preprocessor)
try:
return tmpl.render(**template_kwargs)
except Exception as e:
import sys
print(('-' * 78), file=sys.stderr)
print('Template exceptions', file=sys.stderr)
print(('-' * 78), file=sys.stderr)
print(exceptions.text_error_template().render(), file=sys.stderr)
print(('-' * 78), file=sys.stderr)
raise e |
class SplAtConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, radix=2, reduction_factor=4, conv_op=nn.Conv2d, norm_op=nn.BatchNorm2d, dropblock_prob=0.0):
super(SplAtConv2d, self).__init__()
inter_channels = max(((in_channels * radix) // reduction_factor), 32)
self.radix = radix
self.cardinality = groups
self.out_channels = out_channels
self.dropblock_prob = dropblock_prob
self.conv = conv_op(in_channels, (out_channels * radix), kernel_size, stride, padding, dilation, groups=(groups * radix), bias=bias)
self.use_bn = (norm_op is not None)
self.bn0 = norm_op((out_channels * radix))
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Conv2d(out_channels, inter_channels, 1, groups=self.cardinality)
self.bn1 = norm_op(inter_channels)
self.fc2 = nn.Conv2d(inter_channels, (out_channels * radix), 1, groups=self.cardinality)
if (dropblock_prob > 0.0):
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn0(x)
if (self.dropblock_prob > 0.0):
x = self.dropblock(x)
x = self.relu(x)
(batch, channel) = x.shape[:2]
if (self.radix > 1):
splited = torch.split(x, (channel // self.radix), dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, (- 1), 1, 1)
if (self.radix > 1):
atten = torch.split(atten, (channel // self.radix), dim=1)
out = sum([(att * split) for (att, split) in zip(atten, splited)])
else:
out = (atten * x)
return out.contiguous() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.