code stringlengths 101 5.91M |
|---|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--resume', action='store_true')
args = parser.parse_args()
return args |
def test_construct_schema_1positional():
def fun(x: int):
pass
model_type = schema.construct_schema('FunSchema', fun, skip_first_arg=False)
assert (model_type({'x': 5}).to_native() == {'x': 5})
with pytest.raises(models.DataError):
(model_type({'x': 'hi'}).to_native() == {'x': 'hi'}) |
_cache()
def get_gpu_runtime() -> gpu_runtime.GPURuntime:
backend = get_gpu_backend()
if (backend == 'cuda'):
libpath = ctypes.util.find_library('cudart')
if ((os.name == 'nt') and (not libpath)):
for version in (12, 11, 10, 9):
libpath = ctypes.util.find_library(f'cudart64_{version}0')
if libpath:
break
elif (backend == 'hip'):
libpath = ctypes.util.find_library('amdhip64')
else:
raise RuntimeError(f'Cannot obtain GPU runtime library for backend {backend}')
if (not libpath):
envname = ('PATH' if (os.name == 'nt') else 'LD_LIBRARY_PATH')
raise RuntimeError(f'GPU runtime library for {backend} not found. Please set the {envname} environment variable to point to the libraries.')
return gpu_runtime.GPURuntime(backend, libpath) |
def generate_y(num_nodes, batch_size):
row = torch.arange((num_nodes * batch_size), device=device)
col = row[:num_nodes].view(1, (- 1)).repeat(batch_size, 1).view((- 1))
return torch.stack([row, col], dim=0) |
class HardDiskBackend(BaseStorageBackend):
def get(self, filepath):
filepath = str(filepath)
with open(filepath, 'rb') as f:
value_buf = f.read()
return value_buf
def get_text(self, filepath):
filepath = str(filepath)
with open(filepath, 'r') as f:
value_buf = f.read()
return value_buf |
class Logger(object):
def __init__(self, fpath, title=None, resume=False):
self.file = None
self.resume = resume
self.title = ('' if (title == None) else title)
if (fpath is not None):
if resume:
self.file = open(fpath, 'r')
name = self.file.readline()
self.names = name.rstrip().split('\t')
self.numbers = {}
for (_, name) in enumerate(self.names):
self.numbers[name] = []
for numbers in self.file:
numbers = numbers.rstrip().split('\t')
for i in range(0, len(numbers)):
self.numbers[self.names[i]].append(numbers[i])
self.file.close()
self.file = open(fpath, 'a')
else:
self.file = open(fpath, 'w')
def set_names(self, names):
if self.resume:
pass
self.numbers = {}
self.names = names
for (_, name) in enumerate(self.names):
self.file.write(name)
self.file.write('\t')
self.numbers[name] = []
self.file.write('\n')
self.file.flush()
def append(self, numbers):
assert (len(self.names) == len(numbers)), 'Numbers do not match names'
for (index, num) in enumerate(numbers):
self.file.write('{0:.6f}'.format(num))
self.file.write('\t')
self.numbers[self.names[index]].append(num)
self.file.write('\n')
self.file.flush()
def plot(self, names=None):
names = (self.names if (names == None) else names)
numbers = self.numbers
for (_, name) in enumerate(names):
x = np.arange(len(numbers[name]))
plt.plot(x, np.asarray(numbers[name]))
plt.legend([(((self.title + '(') + name) + ')') for name in names])
plt.grid(True)
def close(self):
if (self.file is not None):
self.file.close() |
def sort_by_padding_modified(instances: List[Instance], sorting_keys: List[Tuple[(str, str)]], vocab: Vocabulary, padding_noise: float=0.0) -> List[Instance]:
instances_with_lengths = []
for instance in instances:
instance.index_fields(vocab)
padding_lengths = instance.get_padding_lengths()
padding_lengths['sentences'] = {'num_sentences': len(instance.fields['tokens'].field_list)}
padding_lengths = cast(Dict[(str, Dict[(str, float)])], padding_lengths)
if (padding_noise > 0.0):
noisy_lengths = {}
for (field_name, field_lengths) in padding_lengths.items():
noisy_lengths[field_name] = add_noise_to_dict_values(field_lengths, padding_noise)
padding_lengths = noisy_lengths
instance_with_lengths = ([padding_lengths[field_name][padding_key] for (field_name, padding_key) in sorting_keys], instance)
instances_with_lengths.append(instance_with_lengths)
instances_with_lengths.sort(key=(lambda x: x[0]))
return [instance_with_lengths[(- 1)] for instance_with_lengths in instances_with_lengths] |
def greedySearch(photo):
in_text = 'startseq'
for i in range(max_length):
sequence = [wordtoix[w] for w in in_text.split() if (w in wordtoix)]
sequence = pad_sequences([sequence], maxlen=max_length)
yhat = model.predict([photo, sequence], verbose=0)
yhat = np.argmax(yhat)
word = ixtoword[yhat]
in_text += (' ' + word)
if (word == 'endseq'):
break
final = in_text.split()
final = final[1:(- 1)]
final = ' '.join(final)
return final |
def elapsed(function):
assert callable(function)
timer = Timer()
results = function()
time = timer.elapsed()
if (results is None):
results = time
elif isinstance(results, tuple):
results = tuple((list(results) + [time]))
else:
results = (results, time)
return results |
class BaseStorageBackend(metaclass=ABCMeta):
_allow_symlink = False
def name(self):
return self.__class__.__name__
def allow_symlink(self):
return self._allow_symlink
def get(self, filepath):
pass
def get_text(self, filepath):
pass |
class FaceProblem(Problem):
def __init__(self):
super().__init__()
self._width = 32
self._height = 32
with Image.open('gym_pcgrl/envs/probs/face/lena.jpeg') as im:
im = im.resize((self._width, self._height))
self.face_np = np.array(im)
im.save('face_trg.png')
self._border_tile = 'solid'
self._target_path = 20
self._random_probs = True
self._reward_weights = {'face_1': 1}
self.static_trgs = {'face_1': 0}
self.cond_bounds = {'face_1': (0, 1)}
self._reward_weights = {'face_1': 1}
'\n Get a list of all the different tile names\n\n Returns:`\n string[]: that contains all the tile names\n '
def get_tile_types(self):
return ['r', 'g', 'b']
def is_continuous(self):
return True
'\n Adjust the parameters for the current problem\n\n Parameters:\n width (int): change the width of the problem level\n height (int): change the height of the problem level\n probs (dict(string, float)): change the probability of each tile\n intiialization, the names are "empty", "solid"\n target_path (int): the current path length that the episode turn when it reaches\n rewards (dict(string,float)): the weights of each reward change between the new_stats and old_stats\n '
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._target_path = kwargs.get('target_path', self._target_path)
self._random_probs = kwargs.get('random_probs', self._random_probs)
rewards = kwargs.get('rewards')
if (rewards is not None):
for t in rewards:
if (t in self._reward_weights):
self._reward_weights[t] = rewards[t]
'\n Resets the problem to the initial state and save the start_stats from the starting map.\n Also, it can be used to change values between different environment resets\n\n Parameters:\n start_stats (dict(string,any)): the first stats of the map\n '
def reset(self, start_stats):
super().reset(start_stats)
'\n Get the current stats of the map\n\n Returns:\n dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations.\n The used status are "reigons": number of connected empty tiles, "path-length": the longest path across the map\n '
def get_stats(self, map, lenient_paths=False):
stats = {'face_1': (np.sum(np.abs(((self.face_np.transpose(2, 0, 1) / 255) - map))) / reduce(mul, map.shape))}
return stats
'\n Get the current game reward between two stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n float: the current reward due to the change between the old map stats and the new map stats\n '
def get_reward(self, new_stats, old_stats):
rewards = {'face_1': get_range_reward(new_stats['face_1'], old_stats['face_1'], 1, 1)}
return (rewards['face_1'] * self._reward_weights['face_1'])
'\n Uses the stats to check if the problem ended (episode_over) which means reached\n a satisfying quality based on the stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n boolean: True if the level reached satisfying quality based on the stats and False otherwise\n '
def get_episode_over(self, new_stats, old_stats):
return (new_stats['face_1'] == 1)
'\n Get any debug information need to be printed\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n dict(any,any): is a debug information that can be used to debug what is\n happening in the problem\n '
def get_debug_info(self, new_stats, old_stats):
return {'face_1': (new_stats['face_1'] - self._start_stats['face_1'])}
'\n Get an image on how the map will look like for a specific map\n\n Parameters:\n map (string[][]): the current game map\n\n Returns:\n Image: a pillow image on how the map will look like using the binary graphics\n '
def render(self, map):
map = map.transpose(1, 2, 0)
return Image.fromarray((map * 255).astype(np.uint8), 'RGB') |
class Macaulay2Element(ExtraTabCompletion, ExpectElement, sage.interfaces.abc.Macaulay2Element):
def _latex_(self):
s = self.tex().external_string().strip('"').strip('$').replace('\\\\', '\\')
s = s.replace('\\bgroup', '').replace('\\egroup', '')
return s
def __iter__(self):
for i in range(len(self)):
(yield self[i])
def __str__(self):
P = self._check_valid()
return P.get(self._name)
def _repr_(self):
from sage.typeset.ascii_art import empty_ascii_art
P = self.parent()
if P.options.after_print:
width = (14 + empty_ascii_art._terminal_width())
return P.eval(('printWidth=%d;%s' % (width, self._name)))
return P.eval(('print(wrap(%d,"-",net %s))' % (empty_ascii_art._terminal_width(), self._name)), strip=False)
def external_string(self):
P = self._check_valid()
code = ('toExternalString(%s)' % self.name())
X = P.eval(code, strip=True)
if ('stdio:' in X):
if ('to external string' in X):
return P.eval(('%s' % self.name()))
raise RuntimeError(('Error evaluating Macaulay2 code.\nIN:%s\nOUT:%s' % (code, X)))
s = multiple_replace({'\r': '', '\n': ' '}, X)
return s
def name(self, new_name=None):
if (new_name is None):
return self._name
if (not isinstance(new_name, str)):
raise TypeError('new_name must be a string')
P = self.parent()
cmd = '(() -> (\n m := lookup(GlobalReleaseHook, class {0});\n if m =!= null then m(symbol {0}, {0});\n {1} = {0};\n ))()'.format(self._name, new_name)
ans = P.eval(cmd)
if (ans.find('stdio:') != (- 1)):
raise RuntimeError(('Error evaluating Macaulay2 code.\nIN:%s\nOUT:%s' % (cmd, ans)))
return P._object_class()(P, new_name, is_name=True)
def __len__(self):
self._check_valid()
return int(str(self.parent()(('#%s' % self.name()))))
def __getitem__(self, n):
self._check_valid()
n = self.parent()(n)
return self.parent().new(('%s # %s' % (self.name(), n.name())))
def __setitem__(self, index, value):
P = self.parent()
index = P(index)
value = P(value)
res = P.eval(('%s # %s = %s' % (self.name(), index.name(), value.name())))
if ('assignment attempted to element of immutable list' in res):
raise TypeError('item assignment not supported')
def __call__(self, x):
self._check_valid()
P = self.parent()
r = P(x)
return P(('%s %s' % (self.name(), r.name())))
def __floordiv__(self, x):
if isinstance(x, (list, tuple)):
y = self.parent(x)
z = self.parent().new(('%s // matrix{%s}' % (self.name(), y.name())))
return list(z.entries().flatten())
else:
return self.parent().new(('%s // %s' % (self.name(), x.name())))
def __mod__(self, x):
if isinstance(x, (list, tuple)):
y = self.parent(x)
return self.parent().new(('%s %% matrix{%s}' % (self.name(), y.name())))
if (not isinstance(x, Macaulay2Element)):
x = self.parent(x)
return self.parent().new(('%s %% %s' % (self.name(), x.name())))
def __bool__(self):
P = self.parent()
return (P.eval('{0}===false or {0}==0'.format(self._name)) != 'true')
def sage_polystring(self):
return self.external_string().replace('^', '**')
def structure_sheaf(self):
from sage.misc.superseded import deprecation
deprecation(27848, 'The function `structure_sheaf` is deprecated. Use `self.sheaf()` instead.')
return self.parent()(('OO_%s' % self.name()))
def substitute(self, *args, **kwds):
return self.__getattr__('substitute')(*args, **kwds)
subs = substitute
def _tab_completion(self):
r = self.parent().eval(('(() -> (\n currentClass := class %s;\n total := {};\n while true do (\n -- Select methods with first argument of the given class\n r := select(methods currentClass, s -> s_1 === currentClass);\n -- Get their names as strings\n r = apply(r, s -> toString s_0);\n -- Keep only alpha-numeric ones\n r = select(r, s -> match("^[[:alnum:]]+$", s));\n -- Add to existing ones\n total = total | select(r, s -> not any(total, e -> e == s));\n if parent currentClass === currentClass then break;\n currentClass = parent currentClass;\n );\n print toString total\n ))()' % self.name()))
r = sorted(r[1:(- 1)].split(', '))
return r
def cls(self):
return self.parent()(('class %s' % self.name()))
def after_print_text(self):
return self.parent().eval(('(lookup({topLevelMode,AfterPrint},' + 'class {0}))({0})'.format(self._name)))
def dot(self, x):
parent = self.parent()
x = parent(x)
return parent(('%s.%s' % (self.name(), x)))
def _operator(self, opstr, x):
parent = self.parent()
x = parent(x)
return parent(('%s%s%s' % (self.name(), opstr, x.name())))
def sharp(self, x):
return self._operator('#', x)
def starstar(self, x):
return self._operator('**', x)
def underscore(self, x):
return self._operator('_', x)
def _sage_(self):
repr_str = str(self)
cls_str = str(self.cls())
cls_cls_str = str(self.cls().cls())
if (repr_str == 'ZZ'):
from sage.rings.integer_ring import ZZ
return ZZ
elif (repr_str == 'QQ'):
from sage.rings.rational_field import QQ
return QQ
if (cls_cls_str == 'Type'):
if (cls_str == 'List'):
return [entry._sage_() for entry in self]
elif (cls_str == 'Matrix'):
base_ring = self.ring()._sage_()
return self._matrix_(base_ring)
elif (cls_str == 'HashTable'):
return {x._sage_(): y._sage_() for (x, y) in self.pairs()}
elif (cls_str == 'Ideal'):
parent = self.ring()._sage_()
gens = self.gens().entries().flatten()._sage_()
return parent.ideal(*gens)
elif (cls_str == 'QuotientRing'):
ambient = self.ambient()
if (ambient.external_string() == 'ZZ'):
from sage.rings.integer_ring import ZZ
from sage.rings.finite_rings.finite_field_constructor import GF
external_string = self.external_string()
(zz, n) = external_string.split('/')
return GF(ZZ(n))
else:
ambient_ring = ambient._sage_()
ideal = self.ideal()._sage_()
return ambient_ring.quotient(ideal, names=ambient_ring.variable_names())
elif (cls_str == 'PolynomialRing'):
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.polynomial.term_order import inv_macaulay2_name_mapping
base_ring = self.coefficientRing()._sage_()
gens = str(self.gens().toString())[1:(- 1)]
if self.options().sharp('Degrees').any('x -> x != {1}')._sage_():
raise ValueError('cannot convert Macaulay2 polynomial ring with non-default degrees to Sage')
external_string = self.external_string()
order = None
if ('MonomialOrder' not in external_string):
order = 'degrevlex'
else:
for order_name in inv_macaulay2_name_mapping:
if (order_name in external_string):
order = inv_macaulay2_name_mapping[order_name]
if ((len(gens) > 1) and (order is None)):
raise ValueError("cannot convert Macaulay2's term order to a Sage term order")
return PolynomialRing(base_ring, order=order, names=gens)
elif (cls_str == 'GaloisField'):
from sage.rings.integer_ring import ZZ
from sage.rings.finite_rings.finite_field_constructor import GF
(gf, n) = repr_str.split(' ')
n = ZZ(n)
if n.is_prime():
return GF(n)
else:
gen = str(self.gens())[1:(- 1)]
return GF(n, gen)
elif (cls_str == 'Boolean'):
if (repr_str == 'true'):
return True
elif (repr_str == 'false'):
return False
elif (cls_str == 'String'):
return str(repr_str)
elif (cls_str == 'Module'):
from sage.modules.free_module import FreeModule
if self.isFreeModule()._sage_():
ring = self.ring()._sage_()
rank = self.rank()._sage_()
return FreeModule(ring, rank)
elif (cls_str in ('Graph', 'Digraph')):
if (cls_str == 'Graph'):
from sage.graphs.graph import Graph
graph_cls = Graph
else:
from sage.graphs.digraph import DiGraph
graph_cls = DiGraph
adj_mat = self.adjacencyMatrix().sage()
g = graph_cls(adj_mat, format='adjacency_matrix')
g.relabel(self.vertices())
return g
elif (cls_str == 'ChainComplex'):
from sage.homology.chain_complex import ChainComplex
ring = self.ring()._sage_()
dd = self.dot('dd')
degree = dd.degree()._sage_()
a = self.min()._sage_()
b = self.max()._sage_()
matrices = {i: dd.underscore(i)._matrix_(ring) for i in range(a, (b + 1))}
return ChainComplex(matrices, degree=degree)
elif (cls_str == 'ChainComplexMap'):
from sage.homology.chain_complex_morphism import ChainComplexMorphism
ring = self.ring()._sage_()
source = self.source()
a = source.min()._sage_()
b = source.max()._sage_()
degree = self.degree()._sage_()
matrices = {i: self.underscore(i)._matrix_(ring) for i in range(a, (b + 1))}
C = source._sage_()
D = self.target()._operator(' ', ('[%s]' % degree))._sage_()
return ChainComplexMorphism(matrices, C, D)
else:
if (cls_str == 'ZZ'):
from sage.rings.integer_ring import ZZ
return ZZ(repr_str)
elif (cls_str == 'QQ'):
from sage.rings.rational_field import QQ
repr_str = self.external_string()
if ('/' not in repr_str):
repr_str = (repr_str + '/1')
return QQ(repr_str)
m2_parent = self.cls()
parent = m2_parent._sage_()
if (cls_cls_str in ('PolynomialRing', 'QuotientRing')):
return parent(self.external_string())
elif (cls_cls_str == 'Module'):
entries = self.entries()._sage_()
return parent._element_constructor_(entries)
from sage.misc.sage_eval import sage_eval
try:
return sage_eval(repr_str)
except Exception:
raise NotImplementedError(('cannot convert %s to a Sage object' % repr_str))
to_sage = deprecated_function_alias(27848, ExpectElement.sage)
def _matrix_(self, R):
from sage.matrix.constructor import matrix
m = matrix(R, self.entries()._sage_())
if (not m.nrows()):
return matrix(R, 0, self.numcols()._sage_())
return m |
class GCN(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(GCN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty([in_features, out_features], dtype=torch.float), requires_grad=True)
if bias:
self.bias = Parameter(torch.empty([out_features], dtype=torch.float))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.weight.size(1)))
self.weight.data.uniform_((- stdv), stdv)
if (self.bias is not None):
self.bias.data.uniform_((- stdv), stdv)
def forward(self, adj, inputs, identity=False):
if identity:
return torch.matmul(adj, self.weight)
return torch.matmul(adj, torch.matmul(inputs, self.weight)) |
class RL2NPO(NPO):
def optimize_policy(self, samples_data):
self._fit_baseline_with_data(samples_data)
samples_data['baselines'] = self._get_baseline_prediction(samples_data)
policy_opt_input_values = self._policy_opt_input_values(samples_data)
logger.log('Computing loss before')
loss_before = self._optimizer.loss(policy_opt_input_values)
logger.log('Computing KL before')
policy_kl_before = self._f_policy_kl(*policy_opt_input_values)
logger.log('Optimizing')
self._optimizer.optimize(policy_opt_input_values)
logger.log('Computing KL after')
policy_kl = self._f_policy_kl(*policy_opt_input_values)
logger.log('Computing loss after')
loss_after = self._optimizer.loss(policy_opt_input_values)
tabular.record('{}/LossBefore'.format(self.policy.name), loss_before)
tabular.record('{}/LossAfter'.format(self.policy.name), loss_after)
tabular.record('{}/dLoss'.format(self.policy.name), (loss_before - loss_after))
tabular.record('{}/KLBefore'.format(self.policy.name), policy_kl_before)
tabular.record('{}/KL'.format(self.policy.name), policy_kl)
pol_ent = self._f_policy_entropy(*policy_opt_input_values)
tabular.record('{}/Entropy'.format(self.policy.name), np.mean(pol_ent))
ev = np_tensor_utils.explained_variance_1d(samples_data['baselines'], samples_data['returns'], samples_data['valids'])
tabular.record('{}/ExplainedVariance'.format(self._baseline.name), ev)
self._old_policy.model.parameters = self.policy.model.parameters
def _get_baseline_prediction(self, samples_data):
paths = samples_data['paths']
baselines = [self._baseline.predict(path) for path in paths]
return np_tensor_utils.pad_tensor_n(baselines, self.max_path_length) |
class VenmoCheckBalance(VirtualFunctionTool):
name = 'VenmoCheckBalance'
summary = "Check the User's Venmo balance."
parameters: List[ArgParameter] = []
returns: List[ArgReturn] = [{'name': 'balance', 'type': 'number', 'description': "The User's current Venmo balance."}]
exceptions: List[ArgException] = [] |
def pre_hook_factory(fn) -> PreHook:
class FunctionalPreHook(PreHook):
def __call__(self, *args, **kwargs):
return fn(*args, **kwargs)
return FunctionalPreHook() |
def list_files_test():
dir_name = '~/test/test_module'
files = utils.list_files(dir_name, '.zip')
print(files) |
def Radial_Basis_Function(student, teacher, ts, n):
loss = []
for l in range((len(student) - 1)):
with tf.variable_scope(('RBF_node%d' % l)):
with tf.variable_scope('weighted_V'):
(svt, svb) = student[l:(l + 2)]
(tvt, tvb) = teacher[l:(l + 2)]
t_sz = svt.get_shape().as_list()
b_sz = svb.get_shape().as_list()
tb_sz = tvb.get_shape().as_list()
n = min(n, b_sz[2], tb_sz[2])
(tst, tsb) = ts[l:(l + 2)]
(svt, tvt) = Align_rsv(svt, tvt, tst, n)
(svb, tvb) = Align_rsv(svb, tvb, tsb, n)
(svt, tvt, svb, tvb) = removenan_pair([svt, tvt, svb, tvb])
with tf.variable_scope('RBF'):
svt = tf.reshape(svt, [t_sz[0], (- 1), 1, n])
svb = tf.reshape(svb, [b_sz[0], 1, (- 1), n])
tvt = tf.reshape(tvt, [t_sz[0], (- 1), 1, n])
tvb = tf.reshape(tvb, [b_sz[0], 1, (- 1), n])
s_rbf = tf.exp(((- tf.square((svt - svb))) / 8))
t_rbf = tf.exp(((- tf.square((tvt - tvb))) / 8))
rbf_loss = (tf.nn.l2_loss((s_rbf - tf.stop_gradient(t_rbf))) * np.sqrt(n))
loss.append(rbf_loss)
return tf.add_n(loss) |
def calculate_i3d_activations(video1, video2, i3d_model, device):
video1 = to_tensors()(video1).unsqueeze(0).to(device)
video2 = to_tensors()(video2).unsqueeze(0).to(device)
video1_activations = get_i3d_activations(video1, i3d_model).cpu().numpy().flatten()
video2_activations = get_i3d_activations(video2, i3d_model).cpu().numpy().flatten()
return (video1_activations, video2_activations) |
class NotNode(UnopNode):
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = (not self.operand.constant_result)
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return (not operand)
except Exception as e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
self.analyse_cpp_operation(env)
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return ('(!%s)' % self.operand.result()) |
def enter_shell():
cmake_args.writeback()
misc.info('Entering shell...')
if (platform.system() == 'Windows'):
shell = (_find_shell() or Shell('cmd.exe', 'cmd.exe'))
if (shell.name in ('pwsh.exe', 'powershell.exe')):
pwsh = Command(shell.exe)
path = _write_ti_pwshrc()
pwsh('-ExecutionPolicy', 'Bypass', '-NoExit', '-File', str(path))
elif (shell.name == 'cmd.exe'):
cmd = Command(shell.exe)
cmd('/k', 'set', 'PROMPT=TaichiBuild $P$G')
else:
os.execl(shell.exe, shell.exe)
else:
shell = (_find_shell() or Shell('bash', '/bin/bash'))
if (shell.name not in ('sh', 'bash', 'zsh')):
import pwd
path = pwd.getpwuid(os.getuid()).pw_shell.split('/')[(- 1)]
name = path.split('/')[(- 1)]
shell = Shell(name, path)
if (shell.name == 'bash'):
path = _write_ti_bashrc()
os.execl(shell.exe, shell.exe, '--rcfile', str(path))
elif (shell.name == 'zsh'):
path = _write_ti_zshrc()
env = os.environ.copy()
env['ZDOTDIR'] = str(path)
os.execle(shell.exe, shell.exe, env)
else:
os.execl(shell.exe, shell.exe) |
.parametrize('ctx, func_name', ctxs_rand)
.parametrize('low, high', [(0, 1), ((- 2.5), 100), (0.1, 0.11)])
.parametrize('shape', [[], [5], [100, 100]])
.parametrize('seed', [(- 1), 313])
def test_rand_forward(seed, ctx, func_name, low, high, shape):
with nn.context_scope(ctx):
o = F.rand(low, high, shape, seed=seed)
assert (o.shape == tuple(shape))
assert (o.parent.name == func_name)
o.forward()
assert np.all((o.d <= high))
assert np.all((o.d >= low))
func_args = [low, high, shape, seed]
recomputation_test(rng=None, func=F.rand, vinputs=[], func_args=func_args, func_kwargs={}, ctx=ctx) |
def rightAttach(node):
node.lnode = node.nodelist.pop(0)
newnode = data.SpanNode('Nucleus')
newnode.nodelist += node.nodelist
newnode.eduspan = tuple([newnode.nodelist[0].eduspan[0], newnode.nodelist[(- 1)].eduspan[1]])
node.rnode = newnode
newnode._id = (node._id + 100)
node.lnode.pnode = node
node.rnode.pnode = node
return newnode |
class ResidualBlock(nn.Module):
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False), nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True), nn.ReLU(inplace=True), nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False), nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))
def forward(self, x):
return (x + self.main(x)) |
def read_in_para_lengths(corpus_dir: str, output_dir: str):
lengths = {}
dict_paragraphs = {}
failed_files = []
for (root, dirs, files) in os.walk(corpus_dir):
for file in files:
with open(os.path.join(corpus_dir, file), 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines if ((line.strip('\n') is not ' ') and (line.strip() is not ''))]
lines = [line.replace('<FRAGMENT_SUPPRESSED>', '').strip() for line in lines if (line.replace('<FRAGMENT_SUPPRESSED>', '').strip() is not '')]
lines = [line.replace('\xa0', '').strip() for line in lines if (line.replace('\xa0', '').strip() is not '')]
lines = [line for line in lines if (re.sub('[^\\w\\s]', '', line) is not '')]
paragraphs = lines_to_paragraphs(lines)
if paragraphs:
paragraphs = only_english(paragraphs)
paragraphs = only_string_in_dict(paragraphs)
(no_intro, no_summ, lengths_para) = count_doc(paragraphs)
lengths.update({file.split('.')[0]: {'intro': no_intro, 'summary': no_summ, 'lengths_paragraphs': lengths_para}})
dict_paragraphs.update({file.split('.')[0]: paragraphs})
else:
print('reading in of file {} doesnt work'.format(file))
failed_files.append(file)
with open(os.path.join(output_dir, 'corpus_lengths.pickle'), 'wb') as f:
pickle.dump(lengths, f)
with open(os.path.join(output_dir, 'corpus_paragraphs.pickle'), 'wb') as f:
pickle.dump(dict_paragraphs, f)
with open(os.path.join(output_dir, 'corpus_failed_files.pickle'), 'wb') as f:
pickle.dump(failed_files, f)
return (lengths, dict_paragraphs, failed_files) |
def outputids2words(id_list, vocab, article_oovs):
words = []
for i in id_list:
try:
w = vocab.id2word(i)
except ValueError as e:
assert (article_oovs is not None), "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
article_oov_idx = (i - vocab.size())
try:
w = article_oovs[article_oov_idx]
except ValueError as e:
raise ValueError(('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs))))
words.append(w)
return words |
(deprecated_in='0.19.7', removed_in='0.21.0', current_version=__version__, details='Loading resources in the client code is no longer required')
def load_resources(name, required_resources=None):
from snips_nlu.resources import load_resources as _load_resources
return _load_resources(name, required_resources) |
def validate_network(val_loader, model, linear_classifier):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
global best_acc
model.eval()
linear_classifier.eval()
criterion = nn.CrossEntropyLoss().cuda()
with torch.no_grad():
end = time.perf_counter()
for (i, (inp, target)) in enumerate(val_loader):
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = linear_classifier(model(inp))
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inp.size(0))
top1.update(acc1[0], inp.size(0))
top5.update(acc5[0], inp.size(0))
batch_time.update((time.perf_counter() - end))
end = time.perf_counter()
if (top1.avg.item() > best_acc):
best_acc = top1.avg.item()
if (args.rank == 0):
logger.info('Test:\tTime {batch_time.avg:.3f}\tLoss {loss.avg:.4f}\ {top1.avg:.3f}\tBest so far {acc:.1f}'.format(batch_time=batch_time, loss=losses, top1=top1, acc=best_acc))
return (losses.avg, top1.avg.item(), top5.avg.item()) |
def boolean_string(s):
if (s not in {'False', 'True'}):
raise ValueError('Not a valid boolean string')
return (s == 'True') |
def test_validator_combine_objectives_no_problem():
v = Validator(model, dataloader, metrics, objectives)
assert (v.combine_objectives(obj_results, alphas, max_normalization) == 0.505)
assert (v.combine_objectives(obj_results, None, max_normalization) == 1.75)
assert (v.combine_objectives(obj_results, alphas, None) == 0.51)
assert (v.combine_objectives(obj_results) == sum(obj_results)) |
def LinIntIndicesAndWeights(ij, NiNj):
i = ij[0]
j = ij[1]
Ni = NiNj[0]
Nj = NiNj[1]
i1 = int(m.floor(i))
i2 = (i1 + 1)
j1 = int(m.floor(j))
j2 = (j1 + 1)
ti = (i - i1)
tj = (j - j1)
w11 = ((1 - ti) * (1 - tj))
w12 = ((1 - ti) * tj)
w21 = (ti * (1 - tj))
w22 = (ti * tj)
indicesAndWeights = []
if ((0 <= i1 < Ni) and (0 <= j1 < Nj)):
indicesAndWeights.append([i1, j1, w11])
if ((0 <= i1 < Ni) and (0 <= j2 < Nj)):
indicesAndWeights.append([i1, j2, w12])
if ((0 <= i2 < Ni) and (0 <= j1 < Nj)):
indicesAndWeights.append([i2, j1, w21])
if ((0 <= i2 < Ni) and (0 <= j2 < Nj)):
indicesAndWeights.append([i2, j2, w22])
return indicesAndWeights |
def get_overload_name_mapping(overload_info):
overload_name_mappings: Dict[(str, List[str])] = {}
for (orig_fn, overloads) in overload_info.items():
original_name = orig_fn.__name__
if (original_name not in overload_name_mappings):
overload_name_mappings[original_name] = []
for (overload_name, _) in overloads:
overload_name_mappings[original_name].append(overload_name)
return overload_name_mappings |
class PinholeCamera():
def __init__(self, width, height, fx, fy, cx, cy):
self.width = int(width)
self.height = int(height)
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
def __str__(self):
string = f'width: {self.width}, height: {self.height}, fx: {self.fx}, fy: {self.fy}, cx: {self.cx}, cy: {self.cy}'
return string
def from_intrinsic(cls, width, height, mat):
fx = mat[(0, 0)]
fy = mat[(1, 1)]
cx = mat[(0, 2)]
cy = mat[(1, 2)]
return cls(width, height, fx, fy, cx, cy)
def shape(self):
return (self.height, self.width)
def size(self):
return self.shape
def intrinsic_matrix(self):
mat = np.array([[self.fx, 0.0, self.cx], [0.0, self.fy, self.cy], [0.0, 0.0, 1.0]])
return mat |
_module()
class LoadImageFromNdarray(LoadImageFromFile):
def __call__(self, results):
assert (results['img'].dtype == 'uint8')
img = results['img']
if ((self.color_type == 'grayscale') and (img.shape[2] == 3)):
img = mmcv.bgr2gray(img, keepdim=True)
if ((self.color_type == 'color') and (img.shape[2] == 1)):
img = mmcv.gray2bgr(img)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = None
results['ori_filename'] = None
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
return results |
def train_and_evaluate_pos(train_data, train_labels, val_data, val_labels, test_data=None, test_labels=None, parser_output_path=None, perl_script_path=None):
print(('Training the tagger on %d examples...' % len(train_data)))
sp = StructuredPerceptron()
tr_data = [(words, tags) for (words, tags) in zip(train_data, train_labels)]
(pos_iterations, pos_learning_rate) = (5, 0.2)
sp.fit(tr_data, iterations=pos_iterations, learning_rate=pos_learning_rate)
val_predictions = sp.predict(val_data)
val_accuracy = pos_accuracy_score(val_labels, val_predictions)
print(('Val acc: %.5f' % val_accuracy))
test_accuracy = None
if ((test_data is not None) and (test_labels is not None)):
test_predictions = sp.predict(test_data)
test_accuracy = pos_accuracy_score(test_labels, test_predictions)
print(('Test acc: %.5f' % test_accuracy))
return (val_accuracy, test_accuracy) |
class OptimizationProblem():
def __init__(self, obj: OptimizationFunction, cons_eq: List[OptimizationFunction]=None, cons_ineq: List[OptimizationFunction]=None):
self.obj = obj
if (cons_eq is None):
cons_eq = []
if (cons_ineq is None):
cons_ineq = []
self.cons_eq = cons_eq
self.cons_ineq = cons_ineq
def calculate_objective_function(self, param):
return self.obj.calculate_objective_function(param)
def calculate_gradient(self, param):
return self.obj.calculate_gradient(param)
def calculate_constraints(self, param):
cons = calculate_objective_parallel((self.cons_eq + self.cons_ineq), param)
return (np.array(cons[:len(self.cons_eq)]), np.array(cons[len(self.cons_eq):]))
def calculate_constraint_gradients(self, param):
cons = calculate_gradient_parallel((self.cons_eq + self.cons_ineq), param)
return (cons[:len(self.cons_eq)], cons[len(self.cons_eq):])
def get_objective(self):
return self.obj
def get_equality_constraints(self):
return self.cons_eq
def get_inequality_constraints(self):
return self.cons_ineq |
def vad_collector(sample_rate, frame_duration_ms, padding_duration_ms, vad, frames):
num_padding_frames = int((padding_duration_ms / frame_duration_ms))
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
if (not triggered):
ring_buffer.append((frame, is_speech))
num_voiced = len([f for (f, speech) in ring_buffer if speech])
if (num_voiced > (0.9 * ring_buffer.maxlen)):
triggered = True
for (f, _) in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for (f, speech) in ring_buffer if (not speech)])
if (num_unvoiced > (0.9 * ring_buffer.maxlen)):
triggered = False
(yield [b''.join([f.bytes for f in voiced_frames]), voiced_frames[0].timestamp, voiced_frames[(- 1)].timestamp])
ring_buffer.clear()
voiced_frames = []
if voiced_frames:
(yield [b''.join([f.bytes for f in voiced_frames]), voiced_frames[0].timestamp, voiced_frames[(- 1)].timestamp]) |
()
('--num_epochs', default=1000)
('--num_train_tasks', default=45)
('--num_test_tasks', default=5)
('--encoder_hidden_size', default=200)
('--net_size', default=300)
('--num_steps_per_epoch', default=4000)
('--num_initial_steps', default=4000)
('--num_steps_prior', default=750)
('--num_extra_rl_steps_posterior', default=750)
('--batch_size', default=256)
('--embedding_batch_size', default=64)
('--embedding_mini_batch_size', default=64)
('--max_path_length', default=150)
_experiment
def pearl_metaworld_ml45(ctxt=None, seed=1, num_epochs=1000, num_train_tasks=45, num_test_tasks=5, latent_size=7, encoder_hidden_size=200, net_size=300, meta_batch_size=16, num_steps_per_epoch=4000, num_initial_steps=4000, num_tasks_sample=15, num_steps_prior=750, num_extra_rl_steps_posterior=750, batch_size=256, embedding_batch_size=64, embedding_mini_batch_size=64, max_path_length=150, reward_scale=10.0, use_gpu=False):
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size, encoder_hidden_size)
ml45_train_envs = [GarageEnv(normalize(mwb.ML45.from_task(task_name))) for task_name in mwb.ML45.get_train_tasks().all_task_names]
ml45_test_envs = [GarageEnv(normalize(mwb.ML45.from_task(task_name))) for task_name in mwb.ML45.get_test_tasks().all_task_names]
env_sampler = EnvPoolSampler(ml45_train_envs)
env_sampler.grow_pool(num_train_tasks)
env = env_sampler.sample(num_train_tasks)
test_env_sampler = EnvPoolSampler(ml45_test_envs)
test_env_sampler.grow_pool(num_test_tasks)
runner = LocalRunner(ctxt)
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(env=env, policy_class=ContextConditionedPolicy, encoder_class=MLPEncoder, inner_policy=inner_policy, qf=qf, vf=vf, num_train_tasks=num_train_tasks, num_test_tasks=num_test_tasks, latent_dim=latent_size, encoder_hidden_sizes=encoder_hidden_sizes, test_env_sampler=test_env_sampler, meta_batch_size=meta_batch_size, num_steps_per_epoch=num_steps_per_epoch, num_initial_steps=num_initial_steps, num_tasks_sample=num_tasks_sample, num_steps_prior=num_steps_prior, num_extra_rl_steps_posterior=num_extra_rl_steps_posterior, batch_size=batch_size, embedding_batch_size=embedding_batch_size, embedding_mini_batch_size=embedding_mini_batch_size, max_path_length=max_path_length, reward_scale=reward_scale)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl, env=env[0](), sampler_cls=LocalSampler, sampler_args=dict(max_path_length=max_path_length), n_workers=1, worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size) |
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, validation=None, test=None, predict=None, wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False, shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = (num_workers if (num_workers is not None) else (batch_size * 2))
self.use_worker_init_fn = use_worker_init_fn
if (train is not None):
self.dataset_configs['train'] = train
self.train_dataloader = self._train_dataloader
if (validation is not None):
self.dataset_configs['validation'] = validation
self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader)
if (test is not None):
self.dataset_configs['test'] = test
self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader)
if (predict is not None):
self.dataset_configs['predict'] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(((k, instantiate_from_config(self.dataset_configs[k])) for k in self.dataset_configs))
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
init_fn = (worker_init_fn if self.use_worker_init_fn else None)
return DataLoader(self.datasets['train'], batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True, worker_init_fn=init_fn)
def _val_dataloader(self, shuffle=False):
init_fn = (worker_init_fn if self.use_worker_init_fn else None)
return DataLoader(self.datasets['validation'], batch_size=self.batch_size, num_workers=self.num_workers, shuffle=shuffle, worker_init_fn=init_fn)
def _test_dataloader(self, shuffle=False):
init_fn = (worker_init_fn if self.use_worker_init_fn else None)
return DataLoader(self.datasets['test'], batch_size=self.batch_size, num_workers=self.num_workers, shuffle=shuffle, worker_init_fn=init_fn)
def _predict_dataloader(self, shuffle=False):
init_fn = (worker_init_fn if self.use_worker_init_fn else None)
return DataLoader(self.datasets['predict'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn) |
class Config(object):
def __init__(self):
self.input_channels = 1
self.kernel_size = 8
self.stride = 1
self.final_out_channels = 32
self.num_classes = 2
self.dropout = 0.35
self.features_len = 4
self.window_size = 18
self.time_step = 18
self.num_epoch = 40
self.beta1 = 0.9
self.beta2 = 0.99
self.lr = 0.0003
self.drop_last = True
self.batch_size = 64
self.Context_Cont = Context_Cont_configs()
self.TC = TC()
self.augmentation = augmentations() |
class ResNetEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([ProteinResNetBlock(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, input_mask=None):
all_hidden_states = ()
for layer_module in self.layer:
if self.output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
hidden_states = layer_module(hidden_states, input_mask)
if self.output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = (outputs + (all_hidden_states,))
return outputs |
def pretty_print_dialogs(dialogs):
for (scene_id, dialog_datum) in enumerate(dialogs):
for dialog in dialog_datum['dialogs']:
print(dialog['caption'])
for (round_id, ii) in enumerate(dialog['dialog']):
coref_id = dialog['graph']['history'][(round_id + 1)]['dependence']
in_tuple = (round_id, ii['question'], str(ii['answer']), ii['template'], str(coref_id))
print(('\t[Q-%d: %s] [A: %s] [%s] [%s]' % in_tuple)) |
def test_serialize_BoostedRDNClassifier(tmpdir):
output_json = tmpdir.join('ToyCancerRDN.json')
(train, test) = load_toy_cancer()
bkg = Background(modes=train.modes)
rdn = BoostedRDNClassifier(background=bkg, target='cancer', n_estimators=5)
rdn.fit(train)
rdn.to_json(output_json)
rdn2 = BoostedRDNClassifier()
rdn2.from_json(output_json)
_predictions = rdn2.predict(test)
assert (len(rdn2.estimators_) == 5)
assert_array_equal(_predictions, np.array([1.0, 1.0, 1.0, 0.0, 0.0])) |
('/direct')
def direct():
pattern = request.args.get('pattern')
target = request.args.get('target')
if re.search(pattern, target):
return 'true'
else:
return 'false' |
class DataHolder():
def __init__(self, X, y):
self.X = X
self.y = y
self.attributes = ['X', 'y']
def get_stats(self, field):
assert (field in self.attributes)
lens = [(len(x) - 2) for x in getattr(self, field)]
return {'min_length': min(lens), 'max_length': max(lens), 'mean_length': np.mean(lens), 'std_length': np.std(lens)}
def mock(self, n=200):
data_kwargs = {key: getattr(self, key)[:n] for key in self.attributes}
return DataHolder(**data_kwargs)
def filter(self, idxs):
data_kwargs = {key: [getattr(self, key)[i] for i in idxs] for key in self.attributes}
return DataHolder(**data_kwargs) |
def topic_to_mention_list(topic, is_gold):
event_mentions = []
entity_mentions = []
for (doc_id, doc) in topic.docs.items():
for (sent_id, sent) in doc.sentences.items():
if is_gold:
event_mentions.extend(sent.gold_event_mentions)
entity_mentions.extend(sent.gold_entity_mentions)
else:
event_mentions.extend(sent.pred_event_mentions)
entity_mentions.extend(sent.pred_entity_mentions)
return (event_mentions, entity_mentions) |
class Scale(object):
def __init__(self, size, interpolation=Image.BILINEAR):
assert (isinstance(size, int) or (isinstance(size, collections.Iterable) and (len(size) == 2)))
self.size = size
self.interpolation = interpolation
def __call__(self, img, inv, flow):
if isinstance(self.size, int):
(w, h) = img.size
if (((w <= h) and (w == self.size)) or ((h <= w) and (h == self.size))):
return img
if (w < h):
ow = self.size
oh = int(((self.size * h) / w))
return img.resize((ow, oh), self.interpolation)
else:
oh = self.size
ow = int(((self.size * w) / h))
return img.resize((ow, oh), self.interpolation)
else:
return img.resize(self.size, self.interpolation)
def randomize_parameters(self):
pass |
def same_plane(plane1, plane2):
same = True
trans1 = plane1['pt']
trans2 = plane2['pt']
for key in trans1.keys():
v1 = trans1[key]
v2 = trans2[key]
if ((v1['x'] != v2['x']) or (v1['y'] != v2['y']) or (v1['z'] != v2['z'])):
same = False
return same |
def estimator_html_repr(estimator):
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
if (not hasattr(estimator, 'fit')):
status_label = '<span>Not fitted</span>'
is_fitted_css_class = ''
else:
try:
check_is_fitted(estimator)
status_label = '<span>Fitted</span>'
is_fitted_css_class = 'fitted'
except NotFittedError:
status_label = '<span>Not fitted</span>'
is_fitted_css_class = ''
is_fitted_icon = f'<span class="sk-estimator-doc-link {is_fitted_css_class}">i{status_label}</span>'
with closing(StringIO()) as out:
container_id = _CONTAINER_ID_COUNTER.get_id()
style_template = Template(_CSS_STYLE)
style_with_id = style_template.substitute(id=container_id)
estimator_str = str(estimator)
fallback_msg = 'In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.'
html_template = f'<style>{style_with_id}</style><div id="{container_id}" class="sk-top-container"><div class="sk-text-repr-fallback"><pre>{html.escape(estimator_str)}</pre><b>{fallback_msg}</b></div><div class="sk-container" hidden>'
out.write(html_template)
_write_estimator_html(out, estimator, estimator.__class__.__name__, estimator_str, first_call=True, is_fitted_css_class=is_fitted_css_class, is_fitted_icon=is_fitted_icon)
out.write('</div></div>')
html_output = out.getvalue()
return html_output |
def erdos_reyni(n, p=None, seed=None):
if (p is None):
p = ((1.0 / n) + 0.1)
return nx.generators.erdos_renyi_graph(n=n, p=p, seed=seed) |
class FiveCrop(object):
def __init__(self, size):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert (len(size) == 2), 'Please provide only two dimensions (h, w) for size.'
self.size = size
def __call__(self, img, mask):
return (F.five_crop(img, self.size), F.five_crop(mask, self.size)) |
class PyObjectType(PyrexType):
name = 'object'
is_pyobject = 1
default_value = '0'
declaration_value = '0'
buffer_defaults = None
is_extern = False
is_subclassed = False
is_gc_simple = False
def __str__(self):
return 'Python object'
def __repr__(self):
return '<PyObjectType>'
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
def default_coerced_ctype(self):
return None
def assignable_from(self, src_type):
return ((not src_type.is_ptr) or src_type.is_string or src_type.is_pyunicode_ptr)
def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
if (pyrex or for_display):
base_code = 'object'
else:
base_code = public_decl('PyObject', dll_linkage)
entity_code = ('*%s' % entity_code)
return self.base_declaration_code(base_code, entity_code)
def as_pyobject(self, cname):
if ((not self.is_complete()) or self.is_extension_type):
return ('(PyObject *)' + cname)
else:
return cname
def py_type_name(self):
return 'object'
def __lt__(self, other):
return False
def global_init_code(self, entry, code):
code.put_init_var_to_py_none(entry, nanny=False)
def check_for_null_code(self, cname):
return cname |
def mk_dist_dir(x64):
if x64:
platform = 'x64'
build_path = BUILD_X64_DIR
else:
platform = 'x86'
build_path = BUILD_X86_DIR
dist_path = os.path.join(DIST_DIR, get_z3_name(x64))
mk_dir(dist_path)
mk_win_dist(build_path, dist_path)
if is_verbose():
print(f"Generated {platform} distribution folder at '{dist_path}'") |
_dispatch
def irfft(x, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None, *, plan=None):
return (Dispatchable(x, np.ndarray),) |
def ensure_no_unnecessary_tuple_sends(graph: Graph, assert_same_stages=True):
if assert_same_stages:
n2 = graph.num_partitions
b4allstages = {n.stage_id for n in graph.nodes}
for n in graph.nodes:
if ((n.type != NodeTypes.OP) or ('tuple::__getitem__' not in n.scope)):
continue
getitem_node = n
tuple_node = n.in_edges[0]
index_node = n.in_edges[1]
if (index_node.type is NodeTypes.CONSTANT):
b4_ids = {getitem_node.stage_id, index_node.stage_id, tuple_node.stage_id}
b4_stage_ids = [getitem_node.stage_id, index_node.stage_id, tuple_node.stage_id]
b4_gpu_ids = [getitem_node.gpu_id, index_node.gpu_id, tuple_node.gpu_id]
getitem_node.stage_id = index_node.stage_id = tuple_node.stage_id
getitem_node.gpu_id = index_node.gpu_id = tuple_node.gpu_id
after = {getitem_node.stage_id}
change = (b4_ids - after)
if change:
for (x, b4_stage_id, b4_gpu_id) in zip([getitem_node, index_node, tuple_node], b4_stage_ids, b4_gpu_ids):
if (b4_stage_id != getitem_node.stage_id):
warnings.warn(f'changed {x.id}: stage:{b4_stage_id}->{getitem_node.stage_id} gpu:{b4_gpu_id}->{getitem_node.gpu_id}')
if assert_same_stages:
after = {n.stage_id for n in graph.nodes}
n3 = graph.num_partitions
assert (n2 == n3), f'Accidentally killed a stage {(n2, n3)}, {(b4allstages - after)}' |
_cache(maxsize=32)
def _setup_so3mm_cuda_kernel(nl, ni, nj, nk, conj_x=False, conj_y=False, trans_x_spec=False, trans_x_feature=False, trans_y_spec=False, trans_y_feature=False, trans_out_feature=False, device=0):
kernel = '\n#define NI {}\n#define NJ {}\n#define NK {}\n'.format(ni, nj, nk)
if ((not trans_x_spec) and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + m * L + p) * NI + i) * NK + k)\n'
if ((not trans_x_spec) and trans_x_feature):
kernel += '#define INDEX_X (((L0 + m * L + p) * NK + k) * NI + i)\n'
if (trans_x_spec and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + p * L + m) * NI + i) * NK + k)\n'
if (trans_x_spec and trans_x_feature):
kernel += '#define INDEX_X (((L0 + p * L + m) * NK + k) * NI + i)\n'
if ((not trans_y_spec) and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NK + k) * NJ + j)\n'
if ((not trans_y_spec) and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NJ + j) * NK + k)\n'
if (trans_y_spec and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NK + k) * NJ + j)\n'
if (trans_y_spec and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NJ + j) * NK + k)\n'
if (not trans_out_feature):
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NI + i) * NJ + j)\n'
if trans_out_feature:
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NJ + j) * NI + i)\n'
kernel += '\n#define CONJ_X {}\n#define CONJ_Y {}\n'.format(('x_im = -x_im;' if conj_x else ';'), ('y_im = -y_im;' if conj_y else ';'))
kernel += '\n#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out)\n{\n // start of thread independant code\n int l = blockIdx.z;\n int L = 2 * l + 1;\n int L0 = (4 * l*l - 1) * l / 3;\n\n if (blockIdx.y * 32 >= L * NI || blockIdx.x * 32 >= L * NJ) {\n return;\n }\n\n int ntile = CEIL_DIV(L * NK, 32);\n // end of thread independant code\n\n int mi = blockIdx.y * 32 + threadIdx.y;\n int m = mi / NI;\n int i = mi % NI;\n int nj = blockIdx.x * 32 + threadIdx.x;\n int n = nj / NJ;\n int j = nj % NJ;\n\n float sum_re = 0.0;\n float sum_im = 0.0;\n\n for (int tile = 0; tile < ntile; ++tile) {\n __shared__ float tileX[2][32][32];\n __shared__ float tileY[2][32][32];\n\n int pk = tile * 32 + threadIdx.x;\n int p = pk / NK;\n int k = pk % NK;\n int index = INDEX_X * 2;\n tileX[0][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 0] : 0.0;\n tileX[1][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 1] : 0.0;\n\n pk = tile * 32 + threadIdx.y;\n p = pk / NK;\n k = pk % NK;\n index = INDEX_Y * 2;\n tileY[0][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 0] : 0.0;\n tileY[1][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 1] : 0.0;\n\n __syncthreads();\n\n for (int any = 0; any < 32; ++any) {\n float x_re = tileX[0][threadIdx.y][any];\n float x_im = tileX[1][threadIdx.y][any];\n float y_re = tileY[0][any][threadIdx.x];\n float y_im = tileY[1][any][threadIdx.x];\n\n CONJ_X\n CONJ_Y\n\n sum_re += x_re * y_re - x_im * y_im;\n sum_im += x_re * y_im + x_im * y_re;\n }\n\n __syncthreads();\n }\n\n if (m < L && n < L) {\n int index = INDEX_OUT * 2;\n out[index + 0] = sum_re;\n out[index + 1] = sum_im;\n }\n}\n'
import s2cnn.utils.cuda as cuda_utils
kernel = cuda_utils.compile_kernel(kernel, 'so3_mm.cu', 'main_')
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
def fun(x, y, output):
assert output.is_contiguous()
kernel(block=(32, 32, 1), grid=(math.ceil(((((2 * nl) - 1) * nj) / 32)), math.ceil(((((2 * nl) - 1) * ni) / 32)), nl), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return fun |
def IntegralLatticeGluing(Lattices, glue, return_embeddings=False):
[direct_sum, phi] = IntegralLatticeDirectSum(Lattices, return_embeddings=True)
N = len(Lattices)
for g in glue:
if (not (len(g) == N)):
raise ValueError('the lengths of the lists do not match')
for i in range(N):
ALi = Lattices[i].discriminant_group()
for g in glue:
ALi(g[i])
generators = [sum(((phi[i]((g[i].lift() * g[i].order())) / g[i].order()) for i in range(N))) for g in glue]
glued_lattice = direct_sum.overlattice(generators)
if (not return_embeddings):
return glued_lattice
HomSpaces = [Lattices[i].Hom(glued_lattice) for i in range(N)]
G = glued_lattice.basis_matrix().solve_left(direct_sum.basis_matrix())
f = [HomSpaces[i]((phi[i].matrix() * G)) for i in range(N)]
return [glued_lattice, f] |
def create_trainer(name, config):
if ((not config.wandb_main) and (config.suffix == '')):
config.suffix = '-dev'
config.experiment = generate_experiment_name(name, config)
if (config.val_check_interval > 1):
config.val_check_interval = int(config.val_check_interval)
if (config.seed is None):
config.seed = randint(0, 999)
seed_everything(config.seed)
filesystem_logger = FilesystemLogger(config)
logger = WandbLogger(project=f'{name}{config.suffix}', name=config.experiment, id=config.experiment, settings=wandb.Settings(start_method='thread'))
checkpoint_callback = ModelCheckpoint(dirpath=(((config.base_dir / Path('runs')) / config.experiment) / 'checkpoints'), filename='_{epoch}', save_last=True, save_top_k=(- 1), verbose=False, every_n_epochs=config.save_epoch)
gpu_count = torch.cuda.device_count()
if (gpu_count > 1):
trainer = Trainer(devices=(- 1), strategy='ddp', accelerator='gpu', log_every_n_steps=config.log_every_n_steps, num_sanity_val_steps=config.sanity_steps, max_epochs=config.max_epoch, limit_val_batches=config.val_check_percent, callbacks=[checkpoint_callback], val_check_interval=float(min(config.val_check_interval, 1)), check_val_every_n_epoch=max(1, config.val_check_interval), resume_from_checkpoint=config.resume, logger=logger, benchmark=True)
else:
trainer = Trainer(devices=[0], accelerator='gpu', num_sanity_val_steps=config.sanity_steps, log_every_n_steps=config.log_every_n_steps, max_epochs=config.max_epoch, limit_val_batches=config.val_check_percent, callbacks=[checkpoint_callback], val_check_interval=float(min(config.val_check_interval, 1)), check_val_every_n_epoch=max(1, config.val_check_interval), resume_from_checkpoint=config.resume, logger=logger, benchmark=True)
return trainer |
def filter_duplicate(orig_answers):
answers = []
for answer in orig_answers:
if is_filtered([a['text'] for a in answers], answer['text']):
continue
answers.append(answer)
return answers |
def crt(v):
if (len(v) == 0):
return IntegerModRing(1)(1)
x = v[0]
for i in range(1, len(v)):
x = x.crt(v[i])
return x |
def test_dowhile():
sdfg = dace.SDFG('dowhiletest')
sdfg.add_array('A', [1], dace.int32)
init = sdfg.add_state()
state1 = sdfg.add_state()
sdfg.add_edge(init, state1, dace.InterstateEdge(assignments={'cond': '1'}))
state2 = sdfg.add_state()
sdfg.add_edge(state1, state2, dace.InterstateEdge(assignments={'cond': 'cond + 1'}))
guard = sdfg.add_state_after(state2)
after = sdfg.add_state()
sdfg.add_edge(guard, state1, dace.InterstateEdge('cond < 5'))
sdfg.add_edge(guard, after, dace.InterstateEdge('cond >= 5'))
t = state1.add_tasklet('something', {'a'}, {'o'}, 'o = a + 1')
r = state1.add_read('A')
w = state1.add_write('A')
state1.add_edge(r, None, t, 'a', dace.Memlet('A'))
state1.add_edge(t, 'o', w, None, dace.Memlet('A'))
A = np.zeros([1], dtype=np.int32)
sdfg(A=A)
assert (A[0] == 4) |
def _to_numeral(num, ctx=None):
if isinstance(num, Numeral):
return num
else:
return Numeral(num, ctx) |
class FormsSpace_abstract(FormsRing_abstract):
from .element import FormsElement
Element = FormsElement
def __init__(self, group, base_ring, k, ep, n):
super().__init__(group=group, base_ring=base_ring, red_hom=True, n=n)
self._weight = k
self._ep = ep
(self._l1, self._l2) = self.weight_parameters()
self._module = None
self._ambient_space = self
def _repr_(self):
return '{}Forms(n={}, k={}, ep={}) over {}'.format(self._analytic_type.analytic_space_name(), self._group.n(), self._weight, self._ep, self._base_ring)
def _latex_(self):
from sage.misc.latex import latex
return '{}_{{ n={} }}({},\\ {})({})'.format(self._analytic_type.latex_space_name(), self._group.n(), self._weight, self._ep, latex(self._base_ring))
def _element_constructor_(self, el):
from .graded_ring_element import FormsRingElement
if isinstance(el, FormsRingElement):
if ((self.hecke_n() == infinity) and (el.hecke_n() == ZZ(3))):
el_f = el._reduce_d()._rat
(x, y, z, d) = self.pol_ring().gens()
num_sub = el_f.numerator().subs(x=(((y ** 2) + (3 * x)) / ZZ(4)), y=((((9 * x) * y) - (y ** 3)) / ZZ(8)), z=(((3 * z) - y) / ZZ(2)))
denom_sub = el_f.denominator().subs(x=(((y ** 2) + (3 * x)) / ZZ(4)), y=((((9 * x) * y) - (y ** 3)) / ZZ(8)), z=(((3 * z) - y) / ZZ(2)))
new_num = (num_sub.numerator() * denom_sub.denominator())
new_denom = (denom_sub.numerator() * num_sub.denominator())
el = (self._rat_field(new_num) / self._rat_field(new_denom))
elif (self.group() == el.group()):
el = el._rat
else:
raise ValueError('{} has group {} != {}'.format(el, el.group(), self.group()))
return self.element_class(self, el)
P = parent(el)
if (is_LaurentSeriesRing(P) or is_PowerSeriesRing(P)):
if self.is_modular():
return self.construct_form(el)
else:
return self.construct_quasi_form(el)
if (is_FreeModuleElement(el) and ((self.module() is P) or (self.ambient_module() is P))):
return self.element_from_ambient_coordinates(el)
if ((not self.is_ambient()) and (isinstance(el, list) or isinstance(el, tuple) or is_FreeModuleElement(el)) and (len(el) == self.rank())):
try:
return self.element_from_coordinates(el)
except (ArithmeticError, TypeError):
pass
if (self.ambient_module() and self.ambient_module().has_coerce_map_from(P)):
return self.element_from_ambient_coordinates(self.ambient_module()(el))
if ((isinstance(el, list) or isinstance(el, tuple)) and (len(el) == self.degree())):
try:
return self.element_from_ambient_coordinates(el)
except (ArithmeticError, TypeError):
pass
return self.element_class(self, el)
def _coerce_map_from_(self, S):
from .space import ZeroForm
from .subspace import SubSpaceForms
if isinstance(S, ZeroForm):
return True
if (isinstance(S, SubSpaceForms) and isinstance(self, SubSpaceForms)):
if self.ambient_space().has_coerce_map_from(S.ambient_space()):
S2 = S.change_ambient_space(self.ambient_space())
return self.module().has_coerce_map_from(S2.module())
else:
return False
elif (isinstance(S, FormsSpace_abstract) and self.graded_ring().has_coerce_map_from(S.graded_ring()) and (S.weight() == self._weight) and (S.ep() == self._ep) and (not isinstance(self, SubSpaceForms))):
return True
else:
return (self.contains_coeff_ring() and self.coeff_ring().has_coerce_map_from(S))
_method
def one(self):
return self.extend_type('holo', ring=True)(1).reduce()
def is_ambient(self):
return (self._ambient_space == self)
def ambient_space(self):
return self._ambient_space
def module(self):
return self._module
def ambient_module(self):
return self._ambient_space._module
def subspace(self, basis):
from .subspace import SubSpaceForms
return SubSpaceForms(self, basis)
def change_ring(self, new_base_ring):
return self.__class__.__base__(self.group(), new_base_ring, self.weight(), self.ep())
def construction(self):
from .functors import FormsSubSpaceFunctor, FormsSpaceFunctor, BaseFacade
ambient_space_functor = FormsSpaceFunctor(self._analytic_type, self._group, self._weight, self._ep)
if self.is_ambient():
return (ambient_space_functor, BaseFacade(self._base_ring))
else:
return (FormsSubSpaceFunctor(ambient_space_functor, self._basis), BaseFacade(self._base_ring))
_method
def weight(self):
return self._weight
_method
def ep(self):
return self._ep
_method
def contains_coeff_ring(self):
return ((self.AT('holo') <= self._analytic_type) and (self.weight() == QQ(0)) and (self.ep() == ZZ(1)))
def element_from_coordinates(self, vec):
if (not self.module()):
raise ValueError(f'no free module defined for {self}')
basis = self.gens()
assert (len(basis) == len(vec))
return self(sum([(vec[k] * basis[k]) for k in range(len(vec))]))
def element_from_ambient_coordinates(self, vec):
return self(self.ambient_space().element_from_coordinates(vec))
def homogeneous_part(self, k, ep):
if ((k == self._weight) and (ep == self._ep)):
return self
else:
raise ValueError('{} already is homogeneous with degree ({}, {}) != ({}, {})!'.format(self, self._weight, self._ep, k, ep))
def weight_parameters(self):
n = self._group.n()
k = self._weight
ep = self._ep
if (n == infinity):
num = ((k - (1 - ep)) / ZZ(4))
else:
num = (((k - (((1 - ep) * ZZ(n)) / ZZ((n - 2)))) * ZZ((n - 2))) / ZZ(4))
if num.is_integral():
num = ZZ(num)
if (n == infinity):
l2 = ZZ(0)
l1 = num
else:
l2 = (num % n)
l1 = ((num - l2) / n).numerator()
else:
raise ValueError('Invalid or non-occurring weight k={}, ep={}!'.format(k, ep))
return (l1, l2)
def aut_factor(self, gamma, t):
if gamma.is_translation():
return ZZ(1)
elif gamma.is_reflection():
return (self._ep * ((t / QQbar(I)) ** self._weight))
else:
L = list(gamma.word_S_T()[0])
aut_f = ZZ(1)
while (len(L) > 0):
M = L.pop((- 1))
aut_f *= self.aut_factor(M, t)
t = M.acton(t)
return aut_f
_method
def F_simple(self, order_1=ZZ(0)):
(x, y, z, d) = self.rat_field().gens()
n = self.hecke_n()
if (n == infinity):
order_1 = ZZ(order_1)
order_inf = (self._l1 - order_1)
finf_pol = (d * (x - (y ** 2)))
rat = (((finf_pol ** order_inf) * (x ** order_1)) * (y ** (ZZ((1 - self._ep)) / ZZ(2))))
else:
order_inf = self._l1
order_1 = order_inf
finf_pol = (d * ((x ** n) - (y ** 2)))
rat = (((finf_pol ** self._l1) * (x ** self._l2)) * (y ** (ZZ((1 - self._ep)) / ZZ(2))))
if ((order_inf > 0) and (order_1 > 0)):
new_space = self.extend_type('cusp')
elif ((order_inf >= 0) and (order_1 >= 0)):
new_space = self.extend_type('holo')
else:
new_space = self.extend_type('weak')
return new_space(rat)
def Faber_pol(self, m, order_1=ZZ(0), fix_d=False, d_num_prec=None):
m = ZZ(m)
if (self.hecke_n() == infinity):
order_1 = ZZ(order_1)
order_inf = (self._l1 - order_1)
else:
order_inf = self._l1
order_1 = order_inf
if (m > order_inf):
raise ValueError('Invalid basis index: m = {} > {} = order_inf!'.format(m, order_inf))
prec = (((2 * order_inf) - m) + 1)
d = self.get_d(fix_d=fix_d, d_num_prec=d_num_prec)
q = self.get_q(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)
simple_qexp = self.F_simple(order_1=order_1).q_expansion(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)
J_qexp = self.J_inv().q_expansion(prec=(order_inf - m), fix_d=fix_d, d_num_prec=d_num_prec)
temp_reminder = ((1 / simple_qexp) / (q ** (- m))).add_bigoh(1)
fab_pol = q.parent()([])
while (len(temp_reminder.coefficients()) > 0):
temp_coeff = temp_reminder.coefficients()[0]
temp_exp = (- temp_reminder.exponents()[0])
fab_pol += (temp_coeff * ((q / d) ** temp_exp))
temp_reminder -= (temp_coeff * ((J_qexp / d) ** temp_exp))
if (not d.parent().is_exact()):
temp_reminder = temp_reminder.truncate_neg(((- temp_exp) + 1))
return fab_pol.polynomial()
def faber_pol(self, m, order_1=ZZ(0), fix_d=False, d_num_prec=None):
m = ZZ(m)
if (self.hecke_n() == infinity):
order_1 = ZZ(order_1)
order_inf = (self._l1 - order_1)
else:
order_inf = self._l1
order_1 = order_inf
if (m > order_inf):
raise ValueError('Invalid basis index: m = {} > {} = order_inf!'.format(m, order_inf))
prec = (((2 * order_inf) - m) + 1)
d = self.get_d(fix_d=fix_d, d_num_prec=d_num_prec)
q = self.get_q(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)
simple_qexp = self.F_simple(order_1=order_1).q_expansion(prec=prec, fix_d=fix_d, d_num_prec=d_num_prec)
j_qexp = self.j_inv().q_expansion(prec=(order_inf - m), fix_d=fix_d, d_num_prec=d_num_prec)
temp_reminder = ((1 / simple_qexp) / (q ** (- m))).add_bigoh(1)
fab_pol = q.parent()([])
while (len(temp_reminder.coefficients()) > 0):
temp_coeff = temp_reminder.coefficients()[0]
temp_exp = (- temp_reminder.exponents()[0])
fab_pol += (temp_coeff * (q ** temp_exp))
temp_reminder -= (temp_coeff * (j_qexp ** temp_exp))
if (not d.parent().is_exact()):
temp_reminder = temp_reminder.truncate_neg(((- temp_exp) + 1))
return fab_pol.polynomial()
def F_basis_pol(self, m, order_1=ZZ(0)):
(x, y, z, d) = self.rat_field().gens()
n = self._group.n()
if (n == infinity):
order_1 = ZZ(order_1)
order_inf = (self._l1 - order_1)
finf_pol = (d * (x - (y ** 2)))
jinv_pol = (x / (x - (y ** 2)))
rat = ((((finf_pol ** order_inf) * (x ** order_1)) * (y ** (ZZ((1 - self._ep)) / ZZ(2)))) * self.Faber_pol(m, order_1)(jinv_pol))
else:
order_inf = self._l1
order_1 = order_inf
finf_pol = (d * ((x ** n) - (y ** 2)))
jinv_pol = ((x ** n) / ((x ** n) - (y ** 2)))
rat = ((((finf_pol ** order_inf) * (x ** self._l2)) * (y ** (ZZ((1 - self._ep)) / ZZ(2)))) * self.Faber_pol(m)(jinv_pol))
return rat
def F_basis(self, m, order_1=ZZ(0)):
basis_pol = self.F_basis_pol(m, order_1=order_1)
if (self.hecke_n() == infinity):
(x, y, z, d) = self.pol_ring().gens()
if (x.divides(basis_pol.numerator()) and (m > 0)):
new_space = self.extend_type('cusp')
elif (x.divides(basis_pol.denominator()) or (m < 0)):
new_space = self.extend_type('weak')
else:
new_space = self.extend_type('holo')
elif (m > 0):
new_space = self.extend_type('cusp')
elif (m >= 0):
new_space = self.extend_type('holo')
else:
new_space = self.extend_type('weak')
return new_space(basis_pol)
def _canonical_min_exp(self, min_exp, order_1):
min_exp = ZZ(min_exp)
order_1 = ZZ(order_1)
if self.is_holomorphic():
if self.is_cuspidal():
min_exp = max(min_exp, 1)
order_1 = max(order_1, 1)
else:
min_exp = max(min_exp, 0)
order_1 = max(order_1, 0)
if (self.hecke_n() != infinity):
order_1 = ZZ(0)
return (min_exp, order_1)
def quasi_part_gens(self, r=None, min_exp=0, max_exp=infinity, order_1=ZZ(0)):
if (not self.is_weakly_holomorphic()):
from warnings import warn
warn('This function only determines generators of (quasi) weakly modular forms!')
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
if self.is_modular():
r = ZZ(r)
if (r != 0):
return []
n = self.hecke_n()
if (n == infinity):
max_numerator_weight = (((self._weight - (4 * min_exp)) - (4 * order_1)) + 4)
else:
max_numerator_weight = ((self._weight - (((4 * n) / (n - 2)) * min_exp)) + 4)
if (r is None):
gens = []
for rnew in range(ZZ(0), (QQ((max_numerator_weight / ZZ(2))).floor() + 1)):
gens += self.quasi_part_gens(r=rnew, min_exp=min_exp, max_exp=max_exp, order_1=order_1)
return gens
r = ZZ(r)
if ((r < 0) or ((2 * r) > max_numerator_weight)):
return []
E2 = self.E2()
ambient_weak_space = self.graded_ring().reduce_type('weak', degree=((self._weight - QQ((2 * r))), (self._ep * ((- 1) ** r))))
order_inf = (ambient_weak_space._l1 - order_1)
if (max_exp == infinity):
max_exp = order_inf
elif (max_exp < min_exp):
return []
else:
max_exp = min(ZZ(max_exp), order_inf)
gens = []
for m in range(min_exp, (max_exp + 1)):
gens += [self((ambient_weak_space.F_basis(m, order_1=order_1) * (E2 ** r)))]
return gens
def quasi_part_dimension(self, r=None, min_exp=0, max_exp=infinity, order_1=ZZ(0)):
if (not self.is_weakly_holomorphic()):
from warnings import warn
warn('This function only determines the dimension of some (quasi) weakly subspace!')
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
if self.is_modular():
r = ZZ(0)
if (r != 0):
return ZZ(0)
n = self.hecke_n()
if (n == infinity):
max_numerator_weight = (((self._weight - (4 * min_exp)) - (4 * order_1)) + 4)
else:
max_numerator_weight = ((self._weight - (((4 * n) / (n - 2)) * min_exp)) + 4)
if (r is None):
return sum([self.quasi_part_dimension(r=rnew, min_exp=min_exp, max_exp=max_exp, order_1=order_1) for rnew in range(ZZ(0), (QQ((max_numerator_weight / ZZ(2))).floor() + 1))])
r = ZZ(r)
if ((r < 0) or ((2 * r) > max_numerator_weight)):
return ZZ(0)
k = (self._weight - QQ((2 * r)))
ep = (self._ep * ((- 1) ** r))
if (n == infinity):
num = ((k - (1 - ep)) / ZZ(4))
l2 = order_1
order_inf = (ZZ(num) - order_1)
else:
num = ZZ((((k - (((1 - ep) * ZZ(n)) / ZZ((n - 2)))) * ZZ((n - 2))) / ZZ(4)))
l2 = (num % n)
order_inf = ((num - l2) / n).numerator()
if (max_exp == infinity):
max_exp = order_inf
elif (max_exp < min_exp):
return ZZ(0)
else:
max_exp = min(ZZ(max_exp), order_inf)
return max(ZZ(0), ((max_exp - min_exp) + 1))
def construct_form(self, laurent_series, order_1=ZZ(0), check=True, rationalize=False):
base_ring = laurent_series.base_ring()
if is_PolynomialRing(base_ring.base()):
if (not self.coeff_ring().has_coerce_map_from(base_ring)):
raise ValueError("The Laurent coefficients don't coerce into the coefficient ring of self!")
elif rationalize:
laurent_series = self.rationalize_series(laurent_series)
else:
raise ValueError('The Laurent coefficients are not in the proper form yet. Try rationalize_series(laurent_series) beforehand (experimental).')
order_1 = self._canonical_min_exp(0, order_1)[1]
order_inf = (self._l1 - order_1)
if (laurent_series.prec() < (order_inf + 1)):
raise ValueError('Insufficient precision: {} < {} = order_inf!'.format(laurent_series.prec(), (order_inf + 1)))
new_series = laurent_series.add_bigoh((order_inf + 1))
coefficients = new_series.coefficients()
exponents = new_series.exponents()
if (len(coefficients) == 0):
return self(0)
rat = sum([(coefficients[j] * self.F_basis_pol(exponents[j], order_1=order_1)) for j in range(ZZ(len(coefficients)))])
el = self(rat)
if check:
prec = min(laurent_series.prec(), (laurent_series.exponents()[(- 1)] + 1))
if (el.q_expansion(prec=prec) != laurent_series):
raise ValueError('The Laurent series {} does not correspond to a form of {}'.format(laurent_series, self.reduce_type(['weak'])))
return el
_method
def _quasi_form_matrix(self, min_exp=0, order_1=ZZ(0), incr_prec_by=0):
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
order_inf = (self._l1 - order_1)
max_exp = (order_inf + 1)
basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)
column_size = len(basis)
row_size = (column_size + incr_prec_by)
prec = (row_size + min_exp)
coeff_ring = self.coeff_ring()
A = matrix(coeff_ring, row_size, 0)
for gen in basis:
A = A.augment(gen.q_expansion_vector(min_exp=min_exp, max_exp=(prec - 1)))
if (A.rank() < column_size):
if (incr_prec_by == 0):
from sage.misc.verbose import verbose
verbose('Encountered a base change matrix with not-yet-maximal rank (rare, please report)!')
incr_prec_by += ((column_size // ZZ(5)) + 1)
return self._quasi_form_matrix(min_exp=min_exp, order_1=order_1, incr_prec_by=incr_prec_by)
elif (incr_prec_by == 0):
return A
while (A.rank() == column_size):
row_size = A.dimensions()[0]
if (row_size == column_size):
return A
B = A
A = A.delete_rows(list(range(((column_size + ((row_size - column_size) // 2)) - 1), row_size)))
while (B.rank() == column_size):
A = B
row_size = B.dimensions()[0]
B = B.delete_rows([(row_size - 1)])
return A
def required_laurent_prec(self, min_exp=0, order_1=ZZ(0)):
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
return (self._quasi_form_matrix(min_exp=min_exp, order_1=order_1).dimensions()[0] + min_exp)
def construct_quasi_form(self, laurent_series, order_1=ZZ(0), check=True, rationalize=False):
base_ring = laurent_series.base_ring()
if is_PolynomialRing(base_ring.base()):
if (not self.coeff_ring().has_coerce_map_from(base_ring)):
raise ValueError("The Laurent coefficients don't coerce into the coefficient ring of self!")
elif rationalize:
laurent_series = self.rationalize_series(laurent_series)
else:
raise ValueError('The Laurent coefficients are not in the proper form yet. Try rationalize_series(laurent_series) beforehand (experimental).')
prec = min(laurent_series.prec(), (laurent_series.exponents()[(- 1)] + 1))
min_exp1 = laurent_series.exponents()[0]
(min_exp, order_1) = self._canonical_min_exp(min_exp1, order_1)
if (min_exp != min_exp1):
raise ValueError('Due to the behavior at infinity the given Laurent series cannot possibly be an element of {}'.format(self))
if self.q_basis.is_in_cache(min_exp=min_exp, order_1=order_1):
basis = self.q_basis(min_exp=min_exp, order_1=order_1)
size = len(basis)
if (prec < (min_exp + size)):
raise ValueError('Insufficient precision: {} < {}!'.format(laurent_series.prec(), (min_exp + size)))
b = vector(self.coeff_ring(), [laurent_series[m] for m in range(min_exp, (min_exp + len(basis)))])
el = self(sum([(b[k] * basis[k]) for k in range(0, len(basis))]))
else:
A = self._quasi_form_matrix(min_exp=min_exp, order_1=order_1)
row_size = A.dimensions()[0]
if (prec < (min_exp + row_size)):
raise ValueError('Insufficient precision: {} < {}!'.format(laurent_series.prec(), (min_exp + row_size)))
b = vector(self.coeff_ring(), [laurent_series[m] for m in range(min_exp, (min_exp + row_size))])
try:
coord_vector = A.solve_right(b)
except ValueError:
raise ValueError('The Laurent series {} does not correspond to a (quasi) form of {}'.format(laurent_series, self.reduce_type(['quasi', 'weak'])))
order_inf = (self._l1 - order_1)
max_exp = (order_inf + 1)
basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)
el = self(sum([(coord_vector[k] * basis[k]) for k in range(0, len(coord_vector))]))
if check:
if (el.q_expansion(prec=prec) != laurent_series):
raise ValueError('The Laurent series {} does not correspond to a form of {}'.format(laurent_series, self.reduce_type(['quasi', 'weak'])))
return el
_method
def q_basis(self, m=None, min_exp=0, order_1=ZZ(0)):
if (not self.is_weakly_holomorphic()):
from warnings import warn
warn('This function only determines elements / a basis of (quasi) weakly modular forms!')
(min_exp, order_1) = self._canonical_min_exp(min_exp, order_1)
order_inf = (self._l1 - order_1)
if (m is None):
A = self._quasi_form_matrix(min_exp=min_exp, order_1=order_1)
if A.is_square():
B = A.inverse()
max_exp = (order_inf + 1)
basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)
column_len = A.dimensions()[1]
q_basis = []
for k in range(0, column_len):
el = self(sum([(B[l][k] * basis[l]) for l in range(0, column_len)]))
q_basis += [el]
return q_basis
else:
raise ValueError("Unfortunately a q_basis doesn't exist in this case (this is rare/interesting, please report)")
else:
if (m < min_exp):
raise ValueError('Index out of range: m={} < {}=min_exp'.format(m, min_exp))
if self.q_basis.is_in_cache(min_exp=min_exp, order_1=order_1):
q_basis = self.q_basis(min_exp=min_exp, order_1=order_1)
column_len = len(q_basis)
if (m >= (column_len + min_exp)):
raise ValueError('Index out of range: m={} >= {}=dimension + min_exp'.format(m, (column_len + min_exp)))
return q_basis[(m - min_exp)]
else:
row_len = (self.required_laurent_prec(min_exp=min_exp, order_1=order_1) - min_exp)
if (m >= (row_len + min_exp)):
raise ValueError('Index out of range: m={} >= {}=required_precision + min_exp'.format(m, (row_len + min_exp)))
A = self._quasi_form_matrix(min_exp=min_exp, order_1=order_1)
b = vector(self.coeff_ring(), row_len)
b[(m - min_exp)] = 1
try:
coord_vector = A.solve_right(b)
except ValueError:
raise ValueError("Unfortunately the q_basis vector (m={}, min_exp={}) doesn't exist in this case (this is rare/interesting, please report)".format(m, min_exp))
max_exp = (order_inf + 1)
basis = self.quasi_part_gens(min_exp=min_exp, max_exp=max_exp, order_1=order_1)
column_len = A.dimensions()[1]
el = self(sum([(coord_vector[l] * basis[l]) for l in range(0, column_len)]))
return el
def rationalize_series(self, laurent_series, coeff_bound=1e-10, denom_factor=ZZ(1)):
from sage.misc.misc_c import prod
from sage.rings.fast_arith import prime_range
from warnings import warn
denom_factor = ZZ(denom_factor)
base_ring = laurent_series.base_ring()
series_prec = laurent_series.prec()
if is_PolynomialRing(base_ring.base()):
if self.coeff_ring().has_coerce_map_from(base_ring):
return laurent_series
else:
raise ValueError("The Laurent coefficients don't coerce into the coefficient ring of self!")
elif (base_ring.is_exact() and (not self.group().is_arithmetic())):
prec = self.default_num_prec()
dvalue = self.group().dvalue().n(prec)
elif base_ring.is_exact():
prec = self.default_num_prec()
dvalue = self.group().dvalue()
else:
prec = laurent_series.base_ring().prec()
dvalue = self.group().dvalue().n(prec)
warn('Using an experimental rationalization of coefficients, please check the result for correctness!')
d = self.get_d()
q = self.get_q()
if ((not base_ring.is_exact()) and coeff_bound):
coeff_bound = base_ring(coeff_bound)
num_q = laurent_series.parent().gen()
laurent_series = sum([(laurent_series[i] * (num_q ** i)) for i in range(laurent_series.exponents()[0], (laurent_series.exponents()[(- 1)] + 1)) if (laurent_series[i].abs() > coeff_bound)]).add_bigoh(series_prec)
first_exp = laurent_series.exponents()[0]
first_coeff = laurent_series[first_exp]
d_power = (first_coeff.abs().n(prec).log() / dvalue.n(prec).log()).round()
if (first_coeff < 0):
return (- self.rationalize_series((- laurent_series), coeff_bound=coeff_bound))
elif ((first_exp + d_power) != 0):
cor_factor = (dvalue ** (- (first_exp + d_power)))
return ((d ** (first_exp + d_power)) * self.rationalize_series((cor_factor * laurent_series), coeff_bound=coeff_bound))
else:
if (base_ring.is_exact() and self.group().is_arithmetic()):
tolerance = 0
else:
tolerance = (10 * ZZ(1).n(prec).ulp())
if (((first_coeff * (dvalue ** first_exp)) - ZZ(1)) > tolerance):
raise ValueError('The Laurent series is not normalized correctly!')
def denominator_estimate(m):
cor_exp = max((- first_exp), 0)
m += cor_exp
if self.group().is_arithmetic():
return (ZZ((1 / dvalue)) ** m)
hecke_n = self.hecke_n()
bad_factors = [fac for fac in Integer(m).factorial().factor() if (((fac[0] % hecke_n) not in [1, (hecke_n - 1)]) and (fac[0] > 2))]
bad_factorial = prod([(fac[0] ** fac[1]) for fac in bad_factors])
return (ZZ(((((2 ** (6 * m)) * (hecke_n ** (2 * m))) * prod([(p ** m) for p in prime_range((m + 1)) if (((hecke_n % p) == 0) and (p > 2))])) * bad_factorial)) ** (cor_exp + 1))
def rationalize_coefficient(coeff, m):
if ((not self.group().is_arithmetic()) and (denominator_estimate(m).log(2).n().ceil() > prec)):
warn('The precision from coefficient m={} on is too low!'.format(m))
rational_coeff = (coeff * (dvalue ** m))
if (base_ring.is_exact() and self.group().is_arithmetic() and (rational_coeff in QQ)):
rational_coeff = QQ(rational_coeff)
else:
int_estimate = ((denominator_estimate(m) * denom_factor) * rational_coeff)
rational_coeff = ((int_estimate.round() / denominator_estimate(m)) / denom_factor)
return (rational_coeff / (d ** m))
laurent_series = sum([(rationalize_coefficient(laurent_series[m], m) * (q ** m)) for m in range(first_exp, (laurent_series.exponents()[(- 1)] + 1))]).add_bigoh(series_prec)
return laurent_series
def _an_element_(self):
return self(ZZ(0))
_method
def dimension(self):
return infinity
def rank(self):
return self.dimension()
def degree(self):
return self.dimension()
def coordinate_vector(self, v):
raise NotImplementedError('No coordinate vector is implemented yet for {}!'.format(self))
_method
def ambient_coordinate_vector(self, v):
return self.module()(self.ambient_space().coordinate_vector(v))
def gens(self):
raise NotImplementedError('No generators are implemented yet for {}!'.format(self))
def gen(self, k=0):
k = ZZ(k)
if ((k >= 0) and (k < self.dimension())):
return self.gens()[k]
else:
raise ValueError('Invalid index: k={} does not satisfy 0 <= k <= {}!'.format(k, self.dimension())) |
def make_vecAdd_sdfg(sdfg_name: str, dtype=dace.float32):
vecWidth = 4
n = dace.symbol('size')
vecAdd_sdfg = dace.SDFG(sdfg_name)
vecType = dace.vector(dtype, vecWidth)
x_name = 'x'
y_name = 'y'
z_name = 'z'
copy_in_state = vecAdd_sdfg.add_state('copy_to_device')
vecAdd_sdfg.add_array(x_name, shape=[(n / vecWidth)], dtype=vecType)
vecAdd_sdfg.add_array(y_name, shape=[(n / vecWidth)], dtype=vecType)
in_host_x = copy_in_state.add_read(x_name)
in_host_y = copy_in_state.add_read(y_name)
vecAdd_sdfg.add_array('device_x', shape=[(n / vecWidth)], dtype=vecType, storage=dace.dtypes.StorageType.FPGA_Global, transient=True)
vecAdd_sdfg.add_array('device_y', shape=[(n / vecWidth)], dtype=vecType, storage=dace.dtypes.StorageType.FPGA_Global, transient=True)
in_device_x = copy_in_state.add_write('device_x')
in_device_y = copy_in_state.add_write('device_y')
copy_in_state.add_memlet_path(in_host_x, in_device_x, memlet=Memlet.simple(in_host_x, '0:{}/{}'.format(n, vecWidth)))
copy_in_state.add_memlet_path(in_host_y, in_device_y, memlet=Memlet.simple(in_host_y, '0:{}/{}'.format(n, vecWidth)))
vecAdd_sdfg.add_array(z_name, shape=[(n / vecWidth)], dtype=vecType)
copy_out_state = vecAdd_sdfg.add_state('copy_to_host')
vecAdd_sdfg.add_array('device_z', shape=[(n / vecWidth)], dtype=vecType, storage=dace.dtypes.StorageType.FPGA_Global, transient=True)
out_device = copy_out_state.add_read('device_z')
out_host = copy_out_state.add_write(z_name)
copy_out_state.add_memlet_path(out_device, out_host, memlet=Memlet.simple(out_host, '0:{}/{}'.format(n, vecWidth)))
fpga_state = vecAdd_sdfg.add_state('fpga_state')
x = fpga_state.add_read('device_x')
y = fpga_state.add_read('device_y')
z = fpga_state.add_write('device_z')
(vecMap_entry, vecMap_exit) = fpga_state.add_map('vecAdd_map', dict(i='0:{0}/{1}'.format(n, vecWidth)), schedule=dace.dtypes.ScheduleType.FPGA_Device)
vecAdd_tasklet = fpga_state.add_tasklet('vecAdd_task', ['x_con', 'y_con'], ['z_con'], 'z_con = x_con + y_con')
fpga_state.add_memlet_path(x, vecMap_entry, vecAdd_tasklet, dst_conn='x_con', memlet=dace.Memlet.simple(x.data, 'i'))
fpga_state.add_memlet_path(y, vecMap_entry, vecAdd_tasklet, dst_conn='y_con', memlet=dace.Memlet.simple(y.data, 'i'))
fpga_state.add_memlet_path(vecAdd_tasklet, vecMap_exit, z, src_conn='z_con', memlet=dace.Memlet.simple(z.data, 'i'))
vecAdd_sdfg.add_edge(copy_in_state, fpga_state, dace.sdfg.sdfg.InterstateEdge())
vecAdd_sdfg.add_edge(fpga_state, copy_out_state, dace.sdfg.sdfg.InterstateEdge())
vecAdd_sdfg.fill_scope_connectors()
vecAdd_sdfg.validate()
return vecAdd_sdfg |
class CIFAR10C(Downloader):
def __init__(self, corruption=None, severity=0):
url = '
base_dir = os.path.dirname(os.path.abspath(__file__))
file_name = os.path.join(base_dir, 'data', 'CIFAR10-C.tar')
data_dir = os.path.join(base_dir, 'data', 'CIFAR-10-C')
if (not os.path.exists(os.path.join(data_dir, '_SUCCESS'))):
self.download(url=url, file_name=file_name, expected_md5='56bf5dcef84df0e2308c6dcbcbbd8499')
self.extract_tar(file_name, extract_dir=data_dir)
if ((severity == 0) or (corruption is None)):
self.data = CIFAR10(split='test')
else:
valid_corruptions = [d[:(- 4)] for d in os.listdir(data_dir) if ((d != 'labels.npy') and d.endswith('.npy'))]
assert (severity in range(1, 6)), f'Got severity={severity}. Expected an int between 1 and 5.'
assert (corruption in valid_corruptions), f'Got corruption={corruption}. Expected one of {valid_corruptions}'
data = np.load(os.path.join(data_dir, (corruption + '.npy')))
labels = np.load(os.path.join(data_dir, 'labels.npy'))
self.data = [(data[i], labels[i]) for i in range(((severity - 1) * 10000), (severity * 10000))]
self.transform = transforms.Compose([transforms.ToTensor(), transforms.Resize(224), CIFAR10.norm()])
self.split = 'test'
def n_class(self):
return 10 |
def _get_methods(cls):
import inspect
return inspect.getmembers(cls, predicate=(lambda x: (inspect.isfunction(x) or inspect.ismethod(x)))) |
class Wav2Vec2ForPreTraining(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def train_opts(parser):
parser.add_argument('-data', required=True, help='Path prefix to the ".train.pt" and\n ".valid.pt" file path from preprocess.py')
parser.add_argument('-save_model', default='model', help='Model filename (the model will be saved as\n <save_model>_epochN_PPL.pt where PPL is the\n validation perplexity')
parser.add_argument('-train_from', default='', type=str, help="If training from a checkpoint then this is the\n path to the pretrained model's state_dict.")
parser.add_argument('-gpuid', default=[], nargs='+', type=int, help='Use CUDA on the listed devices.')
parser.add_argument('-seed', type=int, default=(- 1), help='Random seed used for the experiments\n reproducibility.')
parser.add_argument('-start_epoch', type=int, default=1, help='The epoch from which to start')
parser.add_argument('-param_init', type=float, default=0.1, help='Parameters are initialized over uniform distribution\n with support (-param_init, param_init).\n Use 0 to not use initialization')
parser.add_argument('-pre_word_vecs_enc', help='If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.')
parser.add_argument('-pre_word_vecs_dec', help='If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.')
parser.add_argument('-fix_word_vecs_enc', action='store_true', help='Fix word embeddings on the encoder side.')
parser.add_argument('-fix_word_vecs_dec', action='store_true', help='Fix word embeddings on the encoder side.')
parser.add_argument('-batch_size', type=int, default=64, help='Maximum batch size')
parser.add_argument('-max_generator_batches', type=int, default=32, help='Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but\n uses more memory.')
parser.add_argument('-epochs', type=int, default=13, help='Number of training epochs')
parser.add_argument('-optim', default='sgd', choices=['sgd', 'adagrad', 'adadelta', 'adam'], help='Optimization method.')
parser.add_argument('-max_grad_norm', type=float, default=5, help='If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to\n max_grad_norm')
parser.add_argument('-dropout', type=float, default=0.3, help='Dropout probability; applied in LSTM stacks.')
parser.add_argument('-truncated_decoder', type=int, default=0, help='Truncated bptt.')
parser.add_argument('-learning_rate', type=float, default=1.0, help='Starting learning rate. If adagrad/adadelta/adam\n is used, then this is the global learning rate.\n Recommended settings: sgd = 1, adagrad = 0.1,\n adadelta = 1, adam = 0.001')
parser.add_argument('-learning_rate_decay', type=float, default=0.5, help='If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) epoch has gone past\n start_decay_at')
parser.add_argument('-start_decay_at', type=int, default=8, help='Start decaying every epoch after and including this\n epoch')
parser.add_argument('-start_checkpoint_at', type=int, default=0, help='Start checkpointing every epoch after and including\n this epoch')
parser.add_argument('-decay_method', type=str, default='', choices=['noam'], help='Use a custom decay rate.')
parser.add_argument('-warmup_steps', type=int, default=4000, help='Number of warmup steps for custom decay.')
parser.add_argument('-report_every', type=int, default=50, help='Print stats at this interval.')
parser.add_argument('-exp_host', type=str, default='', help='Send logs to this crayon server.')
parser.add_argument('-exp', type=str, default='', help='Name of the experiment for logging.') |
class Track_IBTRACKS_full(Track):
def __init__(self):
Track.__init__(self)
self.name = ''
self.basin = []
self.dist2land = []
self.nature = []
def import_from_raw_track_IBTRACKS_full(self, rootgrp, id_in_list, time_steps):
Track.import_from_raw_track_IBTRACKS(self, rootgrp, id_in_list, time_steps)
self.name = b''.join(rootgrp['name'][id_in_list]).decode('utf-8')
for t in time_steps:
self.basin.append(rootgrp['basin'][id_in_list][t])
self.dist2land.append(rootgrp['dist2land'][id_in_list][t])
self.nature.append(rootgrp['nature_wmo'][id_in_list][t]) |
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(CHANNELS, 6, kernel_size=3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, NUM_CLASSES)
def forward(self, x, **kwargs):
del kwargs
x = x.view((- 1), CHANNELS, WIDTH, HEIGHT)
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view((- 1), ((16 * 5) * 5))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x |
def register_ring_hom(ring_hom):
domain = ring_hom.domain()
codomain = ring_hom.codomain()
conversion_cached = codomain._is_conversion_cached(domain)
if conversion_cached:
test_map = codomain.convert_map_from(domain)
try:
if (test_map != ring_hom):
verbose(('\nConversion:\n%s\n already exists and is different from:\n%s\n' % (test_map, ring_hom)))
except TypeError:
verbose(('\n Conversion:\n%s\n already exists and is not comparable to:\n%s\n' % (test_map, ring_hom)))
else:
try:
codomain.register_conversion(ring_hom)
except ValueError:
verbose(('\nthe map:\n%s\ncannot be registerd as conversion\n' % ring_hom))
return |
def test_listtype_numpytype_categorical():
t = ListType(NumpyType('int32'), parameters={'__categorical__': True})
assert (str(ak.types.from_datashape(str(t), highlevel=False)) == str(t)) |
def test_multi_objective_set_max_empirical():
multi_cdv_tmp = MultiObjectiveCDV(analytical, normalized=True)
multi_cdv_tmp.set_max_empirical_losses(max_empirical_losses)
(final_loss, alphas) = multi_cdv_tmp.get_descent_vector(losses, gradient)
assert (final_loss.data == ((alphas[0] * max_empirical_loss_1) + (alphas[1] * max_empirical_loss_2)).data)
assert (alphas == alpha_norm_base) |
def test_case158():
url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata157), headers=headers)
print(r.content)
print(r.status_code)
assert (r.status_code == 404) |
def test_Numpy_extend():
def f11(builder):
builder.extend(np.arange(8))
builder = lb.Numpy(np.float32)
f11(builder)
assert (ak.to_list(builder.snapshot()) == list(range(8)))
f11(builder)
assert (ak.to_list(builder.snapshot()) == (list(range(8)) + list(range(8)))) |
def register_Ns3MinstrelHtWifiManager_methods(root_module, cls):
cls.add_constructor([param('ns3::MinstrelHtWifiManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetupMac', 'void', [param('ns3::Ptr< ns3::WifiMac > const', 'mac')], is_virtual=True)
cls.add_method('SetupPhy', 'void', [param('ns3::Ptr< ns3::WifiPhy > const', 'phy')], is_virtual=True)
cls.add_method('DoCreateStation', 'ns3::WifiRemoteStation *', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetDataTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoGetRtsTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoNeedRetransmission', 'bool', [param('ns3::WifiRemoteStation *', 'st'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('bool', 'normally')], visibility='private', is_virtual=True)
cls.add_method('DoReportAmpduTxStatus', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('uint8_t', 'nSuccessfulMpdus'), param('uint8_t', 'nFailedMpdus'), param('double', 'rxSnr'), param('double', 'dataSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ackSnr'), param('ns3::WifiMode', 'ackMode'), param('double', 'dataSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ctsSnr'), param('ns3::WifiMode', 'ctsMode'), param('double', 'rtsSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportRxOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'rxSnr'), param('ns3::WifiMode', 'txMode')], visibility='private', is_virtual=True)
cls.add_method('IsLowLatency', 'bool', [], is_const=True, visibility='private', is_virtual=True)
return |
def get_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
return logger |
def main():
pretrained_weights = './ECCV_MODELS/ECCV_SKUNET_OURS.ckpt.pt'
image = Image.open(sys.argv[1])
urie = SKUNet().cuda().eval()
weights = torch.load(pretrained_weights)
new_weights = {}
for (k, v) in weights.items():
if ('module.' in k):
new_weights[k.replace('module.', '')] = v
else:
new_weights[k] = v
urie.load_state_dict(new_weights, strict=True)
tsfrms = transforms.Compose([transforms.Resize((227, 227)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
image = tsfrms(image).unsqueeze(0).cuda()
(output_image, _) = urie(image)
vutils.save_image(torch.cat((image, output_image), dim=0), './output.jpg', normalize=True, scale_each=True) |
def main():
word2freq = {}
feature2freq = {}
label2freq = {}
with open(sys.argv[1]) as f:
for line in f:
temp = line.strip().split('\t')
(labels, features, words) = (temp[3], temp[4], temp[2])
for label in labels.split():
if (label not in label2freq):
label2freq[label] = 1
else:
label2freq[label] += 1
for word in words.split():
if (word not in word2freq):
word2freq[word] = 1
else:
word2freq[word] += 1
for feature in features.split():
if (feature not in feature2freq):
feature2freq[feature] = 1
else:
feature2freq[feature] += 1
def _local(file_path, X2freq, start_idx=0):
with open(file_path, 'w') as f:
for (i, (X, freq)) in enumerate(sorted(X2freq.items(), key=(lambda t: (- t[1]))), start_idx):
f.write((((((str(i) + '\t') + X) + '\t') + str(freq)) + '\n'))
_local(sys.argv[2], word2freq)
_local(sys.argv[3], feature2freq, start_idx=1)
_local(sys.argv[4], label2freq) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('inshape', [(1,), (2,), (2, 3), (2, 3, 4), (2, 3, 4, 5), (2, 3, 4, 5, 6), (1, 1), (1, 10000), (100000, 1)])
.parametrize('zero_rate', [0.0, 0.5, 1.0])
def test_nonzero_forward(seed, inshape, zero_rate, ctx, func_name):
rng = np.random.RandomState(seed)
input = rng.randn(*inshape).astype(dtype=np.float32)
cond = (rng.rand(*inshape) <= zero_rate)
input = np.where(cond, np.zeros_like(input), input)
vinput = nn.Variable.from_numpy_array(input)
with nn.context_scope(ctx), nn.auto_forward():
o = F.nonzero(vinput)
r = ref_nonzero(input)
assert_allclose(o.d, r)
assert (func_name == o.parent.name) |
def explained_variance_1d(ypred, y):
assert ((y.ndim == 1) and (ypred.ndim == 1))
vary = np.var(y)
if np.isclose(vary, 0):
if (np.var(ypred) > 0):
return 0
return 1
return (1 - (np.var((y - ypred)) / (vary + 1e-08))) |
.skip
def test_lambda_call_jit():
def lamb(A, B, C, f):
A[:] = f(B, C)
A = np.random.rand(20)
B = np.random.rand(20)
C = np.random.rand(20)
f = (lambda a, b: (a + b))
lamb(A, B, C, f)
assert np.allclose(A, (B + C)) |
def load_file(oss_model_dir, local_file_name, oss_file_name=None):
if (oss_file_name is None):
oss_file_name = local_file_name
oss_file_path = '/'.join([oss_model_dir.rstrip('/'), oss_file_name])
oss_file_path = remove_bucket_prefix(oss_file_path)
bucket = get_models_bucket()
bucket.get_object_to_file(oss_file_path, local_file_name) |
class TreeNode():
split_info = None
left_child = None
right_child = None
histograms = None
partition_start = 0
partition_stop = 0
def __init__(self, depth, sample_indices, sum_gradients, sum_hessians, value=None):
self.depth = depth
self.sample_indices = sample_indices
self.n_samples = sample_indices.shape[0]
self.sum_gradients = sum_gradients
self.sum_hessians = sum_hessians
self.value = value
self.is_leaf = False
self.allowed_features = None
self.interaction_cst_indices = None
self.set_children_bounds(float('-inf'), float('+inf'))
def set_children_bounds(self, lower, upper):
self.children_lower_bound = lower
self.children_upper_bound = upper
def __lt__(self, other_node):
return (self.split_info.gain > other_node.split_info.gain) |
_codecs
def get_supported_codecs():
encoder = get_encoder_name()
command = [encoder, '-codecs']
res = Popen(command, stdout=PIPE, stderr=PIPE)
output = res.communicate()[0].decode('utf-8')
if (res.returncode != 0):
return []
if (sys.platform == 'win32'):
output = output.replace('\r', '')
rgx = re.compile('^([D.][E.][AVS.][I.][L.][S.]) (\\w*) +(.*)')
decoders = set()
encoders = set()
for line in output.split('\n'):
match = rgx.match(line.strip())
if (not match):
continue
(flags, codec, name) = match.groups()
if (flags[0] == 'D'):
decoders.add(codec)
if (flags[1] == 'E'):
encoders.add(codec)
return (decoders, encoders) |
.skipif((not cpp17), reason='ROOT was compiled without C++17 support')
def test_UnmaskedArray_NumpyArray():
v2a = ak.contents.unmaskedarray.UnmaskedArray(ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3])))
layout = v2a
generator = ak._connect.cling.togenerator(layout.form, flatlist_as_rvec=False)
lookup = ak._lookup.Lookup(layout, generator)
generator.generate(compiler)
ROOT.gInterpreter.Declare(f'''
void roottest_UnmaskedArray_NumpyArray_v2a(double* out, ssize_t length, ssize_t* ptrs) {{
auto obj = {generator.dataset()};
out[0] = obj.size();
out[1] = obj[1].has_value() ? obj[1].value() : 999.0;
out[2] = obj[3].has_value() ? obj[3].value() : 999.0;
}}
''')
out = np.zeros(3, dtype=np.float64)
ROOT.roottest_UnmaskedArray_NumpyArray_v2a(out, len(layout), lookup.arrayptrs)
assert (out.tolist() == [4.0, 1.1, 3.3]) |
def mean_squared_logarithmic_error(y_true, y_pred):
first_log = K.log((K.clip(y_pred, K.epsilon(), None) + 1.0))
second_log = K.log((K.clip(y_true, K.epsilon(), None) + 1.0))
return K.mean(K.square((first_log - second_log)), axis=(- 1)) |
def register_Ns3FfMacSchedSapUserSchedDlConfigIndParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::FfMacSchedSapUser::SchedDlConfigIndParameters const &', 'arg0')])
cls.add_instance_attribute('m_buildBroadcastList', 'std::vector< ns3::BuildBroadcastListElement_s >', is_const=False)
cls.add_instance_attribute('m_buildDataList', 'std::vector< ns3::BuildDataListElement_s >', is_const=False)
cls.add_instance_attribute('m_buildRarList', 'std::vector< ns3::BuildRarListElement_s >', is_const=False)
cls.add_instance_attribute('m_nrOfPdcchOfdmSymbols', 'uint8_t', is_const=False)
cls.add_instance_attribute('m_vendorSpecificList', 'std::vector< ns3::VendorSpecificListElement_s >', is_const=False)
return |
def autoidentify():
module = sys.modules[__name__]
try:
source = inspect.getsource(module).encode('utf-8')
except TypeError:
logging.info('diagnose_tensorboard.py source unavailable')
else:
blob = (b'blob %d\x00%s' % (len(source), source))
hash = hashlib.sha1(blob).hexdigest()
logging.info('diagnose_tensorboard.py version %s', hash) |
class PhaseShiftLower(PairwiseUnitary):
def __init__(self, phase_shift: float, dtype=NP_COMPLEX):
super(PhaseShiftLower, self).__init__(dtype=dtype)
self.phase_shift = phase_shift
def matrix(self) -> np.ndarray:
return np.array([[1, 0], [0, np.exp((1j * self.phase_shift))]], dtype=self.dtype) |
def __getattr__(name):
return _sub_module_deprecation(sub_package='constants', module='constants', private_modules=['_constants'], all=__all__, attribute=name) |
class ClassificationEvaluator(BaseEvaluator):
def __init__(self, dataset):
self.dataset = dataset
self.num_class = dataset.num_classes
self.dictionary = dataset.dictionary
self.gt_labels = []
self.pred_labels = []
self.count = 0
def Accuracy(self):
all_acc = {}
for (idx, d) in enumerate(self.dictionary):
for (_label, _weight) in d.items():
all_acc[_label] = ((np.equal(self.gt_labels, self.pred_labels) & np.equal(self.gt_labels, idx)).sum() / (np.equal(self.gt_labels, idx).sum() + 1e-06))
return all_acc
def Mean_Accuracy(self):
correct = np.equal(self.gt_labels, self.pred_labels).sum()
accuracy = (correct / (len(self.gt_labels) + 1e-06))
return accuracy
def evaluate(self):
if (self.count < 1):
return None
performances = self.Accuracy()
performances['performance'] = performances['mAcc'] = self.Mean_Accuracy()
return performances
def update(self, gt_label, pred_label):
assert (gt_label.shape == pred_label.shape)
gt_label = gt_label.data.cpu().tolist()
pred_label = pred_label.data.cpu().tolist()
self.gt_labels.extend(gt_label)
self.pred_labels.extend(pred_label)
self.count += 1
def reset(self):
self.gt_labels = []
self.pred_labels = []
self.count = 0 |
def test_computation_cache_clone(cache):
func = MagicMock()
func.is_maximisation_function.return_value = False
func.compute_fitness.return_value = 0
cache.add_fitness_function(func)
func2 = MagicMock()
func2.compute_coverage.return_value = 1
cache.add_coverage_function(func2)
cache._chromosome.has_changed.return_value = False
assert (cache.get_coverage() == 1)
assert (cache.get_fitness() == 0)
new = MagicMock()
cloned = cache.clone(new)
assert (cloned.get_fitness_functions() == [func])
assert (cloned.get_coverage_functions() == [func2])
assert (cloned._is_covered_cache[func] is True)
assert (cloned._fitness_cache[func] == 0)
assert (cloned._coverage_cache[func2] == 1) |
def find_subclasses_recursively(base_cls, sub_cls):
cur_sub_cls = base_cls.__subclasses__()
sub_cls.update(cur_sub_cls)
for cls in cur_sub_cls:
find_subclasses_recursively(cls, sub_cls) |
def numpy_all_the_way(list_of_arrays):
shape = list(list_of_arrays[0].shape)
shape[:0] = [len(list_of_arrays)]
arr = np.concatenate(list_of_arrays).reshape(shape)
return arr |
_scopes
def before_generate_query(context: HookContext, strategy: st.SearchStrategy) -> st.SearchStrategy: |
def compute_metrics(task_name, preds, labels):
assert (len(preds) == len(labels))
if (task_name == 'cola'):
return {'mcc': matthews_corrcoef(labels, preds)}
elif (task_name == 'sst-2'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'mrpc'):
return acc_and_f1(preds, labels)
elif (task_name == 'sts-b'):
return pearson_and_spearman(preds, labels)
elif (task_name == 'qqp'):
return acc_and_f1(preds, labels)
elif (task_name == 'mnli'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'mnli-mm'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'qnli'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'rte'):
return {'acc': simple_accuracy(preds, labels)}
elif (task_name == 'wnli'):
return {'acc': simple_accuracy(preds, labels)}
else:
raise KeyError(task_name) |
def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True):
tt = np.zeros(((t.shape[0] + 1),), dtype=offset_type)
tt[1:] = t
tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type))
if use_begin_offset:
return tt[:(- 1)]
return tt[1:] |
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[1280, 800], help='input image size')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--size-divisor', type=int, default=32, help='Pad the input image, the minimum size that is divisible by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args |
def test_is_open():
board = make_test_boad()
assert _is_open(board, 9)
assert _is_open(board, 19)
assert _is_open(board, 4)
assert (not _is_open(board, 10))
board = _flip_board(board)
assert _is_open(board, 9)
assert _is_open(board, 8)
assert (not _is_open(board, 2))
assert (not _is_open(board, 4)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.