code stringlengths 101 5.91M |
|---|
def copy_vision_model_and_projection(hf_model, pt_weights):
hf_model.visual_projection.weight.data = pt_weights['visual.proj'].data.T
copy_linear(hf_model.vision_model.pre_layrnorm, pt_weights, 'visual.ln_pre')
copy_linear(hf_model.vision_model.post_layernorm, pt_weights, 'visual.ln_post')
hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_weights['visual.conv1.weight'].data
hf_model.vision_model.embeddings.class_embedding.data = pt_weights['visual.class_embedding'].data
hf_model.vision_model.embeddings.position_embedding.weight.data = pt_weights['visual.positional_embedding'].data
copy_layers(hf_model.vision_model.encoder.layers, pt_weights, 'visual.transformer.resblocks') |
class Generator(Model):
def __init__(self, ninputs, fmaps, kwidth, poolings, dec_fmaps=None, dec_kwidth=None, dec_poolings=None, z_dim=None, no_z=False, skip=True, bias=False, skip_init='one', skip_dropout=0, skip_type='alpha', norm_type=None, skip_merge='sum', skip_kwidth=11, name='Generator'):
super().__init__(name=name)
self.skip = skip
self.bias = bias
self.no_z = no_z
self.z_dim = z_dim
self.enc_blocks = nn.ModuleList()
assert isinstance(fmaps, list), type(fmaps)
assert isinstance(poolings, list), type(poolings)
if isinstance(kwidth, int):
kwidth = ([kwidth] * len(fmaps))
assert isinstance(kwidth, list), type(kwidth)
skips = {}
ninp = ninputs
for (pi, (fmap, pool, kw)) in enumerate(zip(fmaps, poolings, kwidth), start=1):
if (skip and (pi < len(fmaps))):
gskip = GSkip(skip_type, fmap, skip_init, skip_dropout, merge_mode=skip_merge, kwidth=skip_kwidth, bias=bias)
l_i = (pi - 1)
skips[l_i] = {'alpha': gskip}
setattr(self, 'alpha_{}'.format(l_i), skips[l_i]['alpha'])
enc_block = GConv1DBlock(ninp, fmap, kw, stride=pool, bias=bias, norm_type=norm_type)
self.enc_blocks.append(enc_block)
ninp = fmap
self.skips = skips
if ((not no_z) and (z_dim is None)):
z_dim = fmaps[(- 1)]
if (not no_z):
ninp += z_dim
if (dec_fmaps is None):
dec_fmaps = (fmaps[::(- 1)][1:] + [1])
else:
assert isinstance(dec_fmaps, list), type(dec_fmaps)
if (dec_poolings is None):
dec_poolings = poolings[:]
else:
assert isinstance(dec_poolings, list), type(dec_poolings)
self.dec_poolings = dec_poolings
if (dec_kwidth is None):
dec_kwidth = kwidth[:]
elif isinstance(dec_kwidth, int):
dec_kwidth = ([dec_kwidth] * len(dec_fmaps))
assert isinstance(dec_kwidth, list), type(dec_kwidth)
self.dec_blocks = nn.ModuleList()
for (pi, (fmap, pool, kw)) in enumerate(zip(dec_fmaps, dec_poolings, dec_kwidth), start=1):
if (skip and (pi > 1) and (pool > 1)):
if (skip_merge == 'concat'):
ninp *= 2
if (pi >= len(dec_fmaps)):
act = 'Tanh'
else:
act = None
if (pool > 1):
dec_block = GDeconv1DBlock(ninp, fmap, kw, stride=pool, norm_type=norm_type, bias=bias, act=act)
else:
dec_block = GConv1DBlock(ninp, fmap, kw, stride=1, bias=bias, norm_type=norm_type)
self.dec_blocks.append(dec_block)
ninp = fmap
def forward(self, x, z=None, ret_hid=False):
hall = {}
hi = x
skips = self.skips
for (l_i, enc_layer) in enumerate(self.enc_blocks):
(hi, linear_hi) = enc_layer(hi, True)
if (self.skip and (l_i < (len(self.enc_blocks) - 1))):
skips[l_i]['tensor'] = linear_hi
if ret_hid:
hall['enc_{}'.format(l_i)] = hi
if (not self.no_z):
if (z is None):
z = torch.randn(hi.size(0), self.z_dim, *hi.size()[2:])
if hi.is_cuda:
z = z.to('cuda')
if (len(z.size()) != len(hi.size())):
raise ValueError('len(z.size) {} != len(hi.size) {}'.format(len(z.size()), len(hi.size())))
if (not hasattr(self, 'z')):
self.z = z
hi = torch.cat((z, hi), dim=1)
if ret_hid:
hall['enc_zc'] = hi
else:
z = None
enc_layer_idx = (len(self.enc_blocks) - 1)
for (l_i, dec_layer) in enumerate(self.dec_blocks):
if (self.skip and (enc_layer_idx in self.skips) and (self.dec_poolings[l_i] > 1)):
skip_conn = skips[enc_layer_idx]
hi = skip_conn['alpha'](skip_conn['tensor'], hi)
hi = dec_layer(hi)
enc_layer_idx -= 1
if ret_hid:
hall['dec_{}'.format(l_i)] = hi
if ret_hid:
return (hi, hall)
else:
return hi |
class IndirectionNode(StatListNode):
def __init__(self, stats):
super(IndirectionNode, self).__init__(stats[0].pos, stats=stats) |
def test_control_cg_hs_multiple(state_forms, bcs_list, J, states, controls, adjoints, config_ocp):
config_ocp.set('AlgoCG', 'cg_method', 'HS')
ocp = cashocs.OptimalControlProblem(state_forms, bcs_list, J, states, controls, adjoints, config=config_ocp)
ocp.solve(algorithm='ncg', rtol=0.01, atol=0.0, max_iter=30)
assert (ocp.solver.relative_norm <= ocp.solver.rtol) |
def compute_bd_locations(features, stride):
(h, w) = features.size()[(- 2):]
num_images = features.shape[0]
locations_per_level = compute_locations_per_level(h, w, stride, features.device)
shifts = locations_per_level.unsqueeze(0)
return shifts |
class UpstreamExpert(nn.Module):
def __init__(self, ckpt, **kwargs):
super(UpstreamExpert, self).__init__()
ckpt = torch.load(ckpt, map_location='cpu')
args = ckpt['Args']
self.upstream = getattr(s3prl.hub, args.upstream)()
self.featurizer = Featurizer(self.upstream, 'last_hidden_state', 'cpu')
config = ckpt['Config']
modelrc = config['downstream_expert']['modelrc']
model_cls = eval(modelrc['select'])
model_conf = modelrc[modelrc['select']]
self.model = model_cls(self.featurizer.output_dim, output_class_num=TIMIT_PHONE_CLASSES, **model_conf)
self.model.load_state_dict(UpstreamExpert._fix_state_key(ckpt['Downstream']))
def _fix_state_key(states):
keys = list(states.keys())
for key in keys:
new_key = '.'.join(key.split('.')[1:])
states[new_key] = states[key]
states.pop(key)
return states
def get_downsample_rates(self, key: str) -> int:
return self.upstream.get_downsample_rates(key)
def forward(self, wavs):
feats = self.upstream(wavs)
feats = self.featurizer(wavs, feats)
feats_length = [len(f) for f in feats]
feats = pad_sequence(feats, batch_first=True)
posteriors = self.model(feats)
posteriors = [F.softmax(p[:l], dim=(- 1)) for (p, l) in zip(posteriors, feats_length)]
posteriors = pad_sequence(posteriors, batch_first=True)
return {'last_hidden_state': posteriors, 'hidden_states': [posteriors]} |
def CntInDegNodes_PUndirNet(Graph, NodeInDeg):
return _snap.CntInDegNodes_PUndirNet(Graph, NodeInDeg) |
def register_debug_signal_handlers(sig=signal.SIGUSR1, handler=print_traceback_handler):
LOGGER.warning(f'Setting signal {sig} handler {handler}')
signal.signal(sig, handler) |
def permutation(pi, invertible=True):
from sage.combinat.permutation import Permutation
pi = Permutation(pi)
n = len(pi)
X = range(1, (n + 1))
return InvertibleFiniteDynamicalSystem(X, pi, inverse=pi.inverse(), create_tuple=True) |
def register_defines():
libpython.source_gdb_script((textwrap.dedent(' define cy step\n cy -step\n end\n\n define cy next\n cy -next\n end\n\n document cy step\n %s\n end\n\n document cy next\n %s\n end\n ') % (CyStep.__doc__, CyNext.__doc__))) |
def __getattr__(name):
return _sub_module_deprecation(sub_package='sparse', module='sparsetools', private_modules=['_sparsetools'], all=__all__, attribute=name) |
def ring_to_gfan_format(input_ring):
gens = str(input_ring.gens()).replace('(', '[').replace(')', ']')
if (input_ring.base_ring() is QQ):
return ('Q' + gens)
elif (input_ring.base_ring() is ZZ):
return ('Z' + gens)
else:
return ('Z/{}Z'.format(input_ring.characteristic()) + gens) |
class LookupLearner(Learner):
def __init__(self):
options = config.options()
self.counters = defaultdict(Counter)
if options.listener:
res = options.listener_color_resolution
hsv = options.listener_hsv
else:
res = options.speaker_color_resolution
hsv = options.speaker_hsv
self.res = res
self.hsv = hsv
self.init_vectorizer()
def init_vectorizer(self):
if (self.res and self.res[0]):
if (len(self.res) == 1):
self.res = (self.res * 3)
self.color_vec = BucketsVectorizer(self.res, hsv=self.hsv)
self.vectorize = (lambda c: self.color_vec.vectorize(c, hsv=True))
self.unvectorize = (lambda c: self.color_vec.unvectorize(c, hsv=True))
self.score_adjustment = (- np.log(((256.0 ** 3) / self.color_vec.num_types)))
else:
self.vectorize = (lambda c: c)
self.unvectorize = (lambda c: c)
self.score_adjustment = 0.0
def num_params(self):
return sum((len(c) for c in self.counters.values()))
def train(self, training_instances, validation_instances='ignored', metrics='ignored'):
options = config.options()
for inst in training_instances:
(inp, out) = (inst.input, inst.output)
if options.listener:
out = self.vectorize(out)
else:
inp = self.vectorize(inp)
self.counters[inp][out] += 1
def predict_and_score(self, eval_instances, random='ignored', verbosity=0):
options = config.options()
if ((options.verbosity + verbosity) >= 2):
print('Testing')
predictions = []
scores = []
for inst in eval_instances:
(inp, out) = (inst.input, inst.output)
if options.listener:
out = self.vectorize(out)
else:
inp = self.vectorize(inp)
counter = self.counters[inp]
highest = counter.most_common(1)
if highest:
if options.listener:
prediction = self.unvectorize(highest[0][0])
else:
prediction = highest[0][0]
elif options.listener:
prediction = (0, 0, 0)
else:
prediction = '<unk>'
total = sum(counter.values())
if total:
if ((options.verbosity + verbosity) >= 9):
print(('%s -> %s: %s of %s [%s]' % (repr(inp), repr(out), counter[out], total, inst.input)))
prob = ((counter[out] * 1.0) / total)
else:
if ((options.verbosity + verbosity) >= 9):
print(('%s -> %s: no data [%s]' % (repr(inp), repr(out), inst.input)))
prob = (1.0 * (inst.output == prediction))
score = np.log(prob)
if options.listener:
score += self.score_adjustment
predictions.append(prediction)
scores.append(score)
return (predictions, scores)
def __getstate__(self):
return {'counters': {k: dict(v) for (k, v) in self.counters.iteritems()}, 'res': self.res, 'hsv': self.hsv}
def __setstate__(self, state):
self.res = state['res']
self.hsv = state['hsv']
self.init_vectorizer()
self.counters = defaultdict(Counter, {k: Counter(v) for (k, v) in state['counters']}) |
def llredsb_Cudd_style(polys):
if polys:
reductors = Polynomial(polys[0].ring().one()).set()
else:
reductors = None
linear_lead = sorted(polys, key=lead_index, reverse=True)
assert (len(set((p.lex_lead() for p in linear_lead))) == len(polys))
assert (not any((p.constant() for p in polys)))
assert (len([p for p in polys if (p.lex_lead_deg() == 1)]) == len(polys))
assert (len(set((p.navigation().value() for p in polys))) == len(polys))
for p in linear_lead:
reductors = combine(reductors, p, reduce=ll_red_nf_redsb)
return reductors |
def load_azure_config(config: SkyplaneConfig, force_init: bool=False, non_interactive: bool=False) -> SkyplaneConfig:
def clear_azure_config(config, verbose=True):
if verbose:
typer.secho(' Disabling Azure support', fg='blue')
config.azure_subscription_id = None
config.azure_client_id = None
config.azure_principal_id = None
config.azure_enabled = False
return config
def make_role_cmds(principal_id, subscription_id):
roles = ['Contributor', 'Storage Blob Data Contributor', 'Storage Account Contributor']
return [(((('az role assignment create --role'.split(' ') + [role]) + f'--assignee-object-id {principal_id} --assignee-principal-type ServicePrincipal'.split(' ')) + f'--subscription {subscription_id}'.split(' ')) + f'--scope /subscriptions/{subscription_id}'.split(' ')) for role in roles]
def run_az_cmd(cmd: List[str], ignore_error=False):
(out, err) = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if (err and (not ignore_error)):
typer.secho(f' Error running command: {cmd}', fg='red', err=True)
typer.secho(f" stdout: {out.decode('utf-8')}", fg='red', err=True)
typer.secho(f" stderr: {err.decode('utf-8')}", fg='red', err=True)
return (False, out, err)
return (True, out, err)
if (non_interactive or typer.confirm(' Do you want to configure Azure support in Skyplane?', default=True)):
if force_init:
typer.secho(' Azure credentials will be re-initialized', fg='red', err=True)
clear_azure_config(config, verbose=False)
if (config.azure_enabled and config.azure_subscription_id and config.azure_principal_id and config.azure_client_id):
typer.secho(' Azure credentials already configured! To reconfigure Azure, run `skyplane init --reinit-azure`.', fg='blue')
return config
if (not shutil.which('az')):
typer.secho(' Azure CLI not found, please install it from \n Then login with `az login`', fg='red', err=True)
return clear_azure_config(config)
defaults = {'client_id': (os.environ.get('AZURE_CLIENT_ID') or config.azure_client_id), 'subscription_id': (os.environ.get('AZURE_SUBSCRIPTION_ID') or config.azure_subscription_id or compute.AzureAuthentication.infer_subscription_id()), 'resource_group': (os.environ.get('AZURE_RESOURCE_GROUP') or compute.AzureServer.resource_group_name), 'umi_name': 'skyplane_umi'}
(out, err) = subprocess.Popen('az --version'.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if (not out.decode('utf-8').startswith('azure-cli')):
typer.secho(' Azure CLI not found, please install it from \n Then login with `az login`', fg='red', err=True)
return clear_azure_config(config)
(success, out, err) = run_az_cmd('az account list -o json --all'.split(' '))
if (not success):
typer.secho(' Error listing Azure subscriptions', fg='red', err=True)
return clear_azure_config(config)
subscriptions = {}
for sub in json.loads(out):
if (sub['state'] == 'Enabled'):
subscriptions[sub['name']] = sub['id']
defaults['subscription_name'] = (next((n for (n, i) in subscriptions.items() if (i == defaults['subscription_id'])), None) if defaults['subscription_id'] else None)
if non_interactive:
config.azure_subscription_id = defaults['subscription_id']
else:
choices = {f'{name} ({id})': id for (name, id) in subscriptions.items()}
default_choice = (f"{defaults['subscription_name']} ({defaults['subscription_id']})" if defaults['subscription_id'] else None)
selected_choice = questionary.select('Select Azure subscription to launch Skyplane VMs in:', choices=list(sorted(choices.keys())), default=default_choice, qmark=' ?', pointer=' > ').ask()
if (selected_choice is None):
typer.secho(' No subscription selected, disabling Azure support', fg='blue')
return clear_azure_config(config)
config.azure_subscription_id = choices[selected_choice]
if (not config.azure_subscription_id):
typer.secho(' Invalid Azure subscription ID', fg='red', err=True)
return clear_azure_config(config)
if non_interactive:
authorize_subscriptions_ids = [config.azure_subscription_id]
else:
choices = {f'{name} ({id})': id for (name, id) in subscriptions.items()}
default_choice = (f"{defaults['subscription_name']} ({defaults['subscription_id']})" if defaults['subscription_id'] else None)
authorize_subscription_strs = questionary.checkbox('Select which Azure subscriptions that Skyplane should be able to read/write data to', choices=list(sorted(choices.keys())), default=default_choice, qmark=' ?', pointer=' > ').ask()
if (not authorize_subscription_strs):
typer.secho(' Note: Skyplane will not be able to read/write data to any Azure subscriptions so you will not be able to use Azure storage.', fg='red')
authorize_subscriptions_ids = []
else:
authorize_subscriptions_ids = [choices[s] for s in authorize_subscription_strs]
if (not config.azure_resource_group):
config.azure_resource_group = (typer.prompt(' Enter the Azure resource group to provision Skyplane VMs in (the default should work in most cases)', default=defaults['resource_group']) if (not non_interactive) else defaults['resource_group'])
if (not config.azure_umi_name):
config.azure_umi_name = (typer.prompt(' Enter the name for the user managed identity that Skyplane VMs will use to access your Azure Storage Accounts (the default should work in most cases)', default=defaults['umi_name']) if (not non_interactive) else defaults['umi_name'])
enable_quota_provider_cmd = f'az provider register -n Microsoft.Quota'
change_subscription_cmd = f'az account set --subscription {config.azure_subscription_id}'
create_rg_cmd = f'az group create -l westus2 -n {config.azure_resource_group}'
create_umi_cmd = f'az identity create -g {config.azure_resource_group} -n {config.azure_umi_name}'
typer.secho(f' I will run the following commands to create an Azure managed identity:', fg='blue')
typer.secho(f' $ {enable_quota_provider_cmd}', fg='yellow')
typer.secho(f' $ {change_subscription_cmd}', fg='yellow')
typer.secho(f' $ {create_rg_cmd}', fg='yellow')
typer.secho(f' $ {create_umi_cmd}', fg='yellow')
with Progress(TextColumn(' '), SpinnerColumn(), TextColumn('Creating Skyplane managed identity{task.description}'), transient=True) as progress:
progress.add_task('', total=None)
_ = run_az_cmd(enable_quota_provider_cmd.split(), ignore_error=True)
(cmd_success, out, err) = run_az_cmd(change_subscription_cmd.split())
if (not cmd_success):
return clear_azure_config(config)
(cmd_success, out, err) = run_az_cmd(create_rg_cmd.split())
if (not cmd_success):
return clear_azure_config(config)
(cmd_success, out, err) = run_az_cmd(create_umi_cmd.split())
if (not cmd_success):
return clear_azure_config(config)
else:
identity_json = json.loads(out.decode('utf-8'))
config.azure_client_id = identity_json['clientId']
config.azure_principal_id = identity_json['principalId']
if ((not config.azure_client_id) or (not config.azure_principal_id) or (not config.azure_subscription_id) or (not config.azure_umi_name) or (not config.azure_resource_group)):
typer.secho(' Azure credentials not configured correctly, disabling Azure support.', fg='red', err=True)
return clear_azure_config(config)
role_cmds = []
for subscription_id in authorize_subscriptions_ids:
role_cmds.extend(make_role_cmds(config.azure_principal_id, subscription_id))
if role_cmds:
typer.secho(f' I will run the following commands to authorize the newly created Skyplane managed identity to access your storage accounts:', fg='blue')
for role_cmd in role_cmds:
typer.secho(f" $ {' '.join(role_cmd)}", fg='yellow')
with Progress(TextColumn(' '), SpinnerColumn(), TextColumn('Authorizing managed identity to access storage accounts{task.description}'), transient=True) as progress:
progress.add_task('', total=None)
for role_cmd in role_cmds:
(cmd_success, out, err) = run_az_cmd(role_cmd)
if (not cmd_success):
return clear_azure_config(config)
typer.secho(f' Azure managed identity created successfully! To delete it, run `az identity delete -n skyplane_umi -g skyplane`.', fg='green')
config.azure_enabled = True
auth = compute.AzureAuthentication(config=config)
with Progress(TextColumn(' '), SpinnerColumn(), TextColumn('Querying Azure for available regions and VM SKUs{task.description}'), transient=True) as progress:
progress.add_task('', total=None)
auth.save_region_config()
return config
else:
return clear_azure_config(config) |
def softmax_focal_loss_with_logits(output: torch.Tensor, target: torch.Tensor, gamma: float=2.0, reduction='mean', normalized=False, reduced_threshold: Optional[float]=None, eps: float=1e-06) -> torch.Tensor:
log_softmax = F.log_softmax(output, dim=1)
loss = F.nll_loss(log_softmax, target, reduction='none')
pt = torch.exp((- loss))
if (reduced_threshold is None):
focal_term = (1.0 - pt).pow(gamma)
else:
focal_term = ((1.0 - pt) / reduced_threshold).pow(gamma)
focal_term[(pt < reduced_threshold)] = 1
loss = (focal_term * loss)
if normalized:
norm_factor = focal_term.sum().clamp_min(eps)
loss = (loss / norm_factor)
if (reduction == 'mean'):
loss = loss.mean()
if (reduction == 'sum'):
loss = loss.sum()
if (reduction == 'batchwise_mean'):
loss = loss.sum(0)
return loss |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, dim):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.GroupNorm(2, dim, eps=0.0001)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.GroupNorm(2, dim, eps=0.0001)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out |
('vectorization', 'semantic', SemanticVectorizerParams)
class Semantic(VectorizationAlgo):
def __init__(self, params: SemanticVectorizerParams):
self.params = params
self.model = None
self.vocab = None
self.vocab_size = None
self.embed_matrix = None
self.vocab_filename = None
self.embed_mat_filename = None
if self.params.model_save_dir:
self.embed_mat_filename = os.path.join(self.params.model_save_dir, 'embedding_matrix.npy')
self.vocab_filename = os.path.join(self.params.model_save_dir, 'vocab.pkl')
self.train_embedding_model = False
if (os.path.exists(self.vocab_filename) and os.path.exists(self.embed_mat_filename)):
self.vocab = pkl.load(open(self.vocab_filename, 'rb'))
self.embed_matrix = np.load(self.embed_mat_filename)
self.params.embedding_dim = self.embed_matrix.shape[1]
self.vocab_size = len(self.vocab)
def _tokenize_logline(self, sentence):
try:
sentence = sentence.translate(str.maketrans(string.punctuation, (' ' * len(string.punctuation))))
except Exception as e:
logging.info('Cannot process line: {} '.format(sentence))
sentence = ''
token_list = word_tokenize(sentence.lower())
return token_list
def fit(self, loglines: pd.Series):
if (self.params.model_save_dir and os.path.exists(self.vocab_filename) and os.path.exists(self.embed_mat_filename)):
self.vocab = pkl.load(open(self.vocab_filename, 'rb'))
self.embed_matrix = np.load(self.embed_mat_filename)
self.params.embedding_dim = self.embed_matrix.shape[1]
self.vocab_size = len(self.vocab)
else:
doc = []
for sentence in loglines:
token_list = self._tokenize_logline(sentence)
doc.extend(token_list)
doc_words = set(doc)
if (self.params.embedding_type.lower() == 'glove'):
if self.train_embedding_model:
self.model = gensim.models.Word2Vec(doc, min_count=self.params.min_token_count, vector_size=self.params.embedding_dim, window=self.params.window)
elif (self.params.embedding_dim in [50, 100, 200, 300]):
self.model = gensim.downloader.load(('glove-wiki-gigaword-' + str(self.embed_dim)))
else:
raise ValueError('embedding dim supported for glove pretrained model is any of (50, 100, 200, 300)')
elif (self.params.embedding_type.lower() == 'word2vec'):
if self.train_embedding_model:
self.model = gensim.models.Word2Vec(doc, min_count=self.params.min_token_count, vector_size=self.params.embedding_dim, window=self.params.window)
else:
if (self.params.embedding_dim != 300):
raise ValueError('embedding dim supported for word2vec pretrained model is 300')
self.model = gensim.downloader.load('word2vec-google-news-300')
elif (self.params.embedding_type.lower() == 'fasttext'):
if self.train_embedding_model:
self.model = gensim.models.FastText(doc, min_count=self.params.min_token_count, vector_size=self.params.embedding_dim, window=self.params.window)
else:
if (self.params.embedding_dim != 300):
raise ValueError('embedding dim supported for fasttext pretrained model is 300')
self.model = gensim.downloader.load('fasttext-wiki-news-subwords-300')
if self.train_embedding_model:
word_vectors = self.model.wv
zero_vectors = np.zeros((3, self.params.embedding_dim))
word_vectors.add_vectors(['UNK', 'PAD', self.params.sep_token], zero_vectors)
self.vocab = {k: i for (i, k) in enumerate(word_vectors.index_to_key)}
self.embed_matrix = word_vectors.vectors
self.vocab_size = len(self.vocab)
if self.vocab_filename:
pkl.dump(self.vocab, open(self.vocab_filename, 'wb'))
if self.embed_mat_filename:
np.save(self.embed_mat_filename, self.embed_matrix)
else:
zero_vectors = np.zeros((3, self.params.embedding_dim))
doc_words.update(['UNK', 'PAD', self.params.sep_token])
self.model.add_vectors(['UNK', 'PAD', self.params.sep_token], zero_vectors)
doc_words = doc_words.intersection(set(self.model.index_to_key))
word_vectors_map = {k: i for (i, k) in enumerate(self.model.index_to_key)}
doc_words_indices = [word_vectors_map[w] for w in doc_words]
self.embed_matrix = self.model.vectors[doc_words_indices]
self.vocab = {k: i for (i, k) in enumerate(doc_words)}
self.vocab_size = len(self.vocab)
if self.vocab_filename:
pkl.dump(self.vocab, open(self.vocab_filename, 'wb'))
if self.embed_mat_filename:
np.save(self.embed_mat_filename, self.embed_matrix)
def transform(self, loglines: pd.Series) -> pd.Series:
log_vectors = []
count = 0
for ll in loglines:
token_list = self._tokenize_logline(ll)
token_ids = [self.vocab.get(t, self.vocab['UNK']) for t in token_list]
log_vector = pad(np.array(token_ids), self.params.max_token_len, padding_value=self.vocab['PAD'])
log_vectors.append(log_vector)
count += 1
log_vector_series = pd.Series(log_vectors, index=loglines.index)
logging.info('Finished converting loglines to token ids')
return log_vector_series
def summary(self):
return self.model.summary() |
def _validate_var_names(adata, source_var_names):
user_var_names = adata.var_names.astype(str)
if (not np.array_equal(source_var_names, user_var_names)):
warnings.warn('var_names for adata passed in does not match var_names of adata used to train the model. For valid results, the vars need to be the same and in the same order as the adata used to train the model.', UserWarning, stacklevel=settings.warnings_stacklevel) |
def unzip(zipped_path, quiet):
unzipped_path = os.path.splitext(zipped_path)[0]
if os.path.exists(unzipped_path):
if (not quiet):
print('{} already exists, skipping ... '.format(unzipped_path))
return
with gzip.open(zipped_path, 'rb') as zipped_file:
with open(unzipped_path, 'wb') as unzipped_file:
unzipped_file.write(zipped_file.read())
if (not quiet):
print('Unzipped {} ...'.format(zipped_path)) |
class BlenderbotConfig(PretrainedConfig):
model_type = 'blenderbot'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=8008, max_position_embeddings=128, encoder_layers=2, encoder_ffn_dim=10240, encoder_attention_heads=32, decoder_layers=24, decoder_ffn_dim=10240, decoder_attention_heads=32, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=2560, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, classifier_dropout=0.0, scale_embedding=False, gradient_checkpointing=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, encoder_no_repeat_ngram_size=3, forced_eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, forced_eos_token_id=forced_eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = scale_embedding
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
def hidden_size(self) -> int:
return self.d_model |
class Vectors(object):
def __init__(self, name, cache='.vector_cache', url=None, unk_init=torch.Tensor.zero_):
self.unk_init = unk_init
self.cache(name, cache, url=url)
def __getitem__(self, token):
if (token in self.stoi):
return self.vectors[self.stoi[token]]
else:
return self.unk_init(torch.Tensor(1, self.dim))
def cache(self, name, cache, url=None):
if os.path.isfile(name):
path = name
path_pt = (os.path.join(cache, os.path.basename(name)) + '.pt')
else:
path = os.path.join(cache, name)
path_pt = (path + '.pt')
if (not os.path.isfile(path_pt)):
if ((not os.path.isfile(path)) and url):
logger.info('Downloading vectors from {}'.format(url))
if (not os.path.exists(cache)):
os.makedirs(cache)
dest = os.path.join(cache, os.path.basename(url))
if (not os.path.isfile(dest)):
with tqdm(unit='B', unit_scale=True, miniters=1, desc=dest) as t:
urlretrieve(url, dest, reporthook=reporthook(t))
logger.info('Extracting vectors into {}'.format(cache))
ext = os.path.splitext(dest)[1][1:]
if (ext == 'zip'):
with zipfile.ZipFile(dest, 'r') as zf:
zf.extractall(cache)
elif (ext == 'gz'):
with tarfile.open(dest, 'r:gz') as tar:
tar.extractall(path=cache)
if (not os.path.isfile(path)):
raise RuntimeError('no vectors found at {}'.format(path))
(itos, vectors, dim) = ([], array.array(str('d')), None)
binary_lines = False
try:
with io.open(path, encoding='utf8') as f:
lines = [line for line in f]
except:
logger.warning('Could not read {} as UTF8 file, reading file as bytes and skipping words with malformed UTF8.'.format(path))
with open(path, 'rb') as f:
lines = [line for line in f]
binary_lines = True
logger.info('Loading vectors from {}'.format(path))
for line in tqdm(lines, total=len(lines)):
entries = line.rstrip().split((b' ' if binary_lines else ' '))
(word, entries) = (entries[0], entries[1:])
if ((dim is None) and (len(entries) > 1)):
dim = len(entries)
elif (len(entries) == 1):
logger.warning('Skipping token {} with 1-dimensional vector {}; likely a header'.format(word, entries))
continue
elif (dim != len(entries)):
raise RuntimeError('Vector for token {} has {} dimensions, but previously read vectors have {} dimensions. All vectors must have the same number of dimensions.'.format(word, len(entries), dim))
if binary_lines:
try:
if isinstance(word, six.binary_type):
word = word.decode('utf-8')
except:
logger.info('Skipping non-UTF8 token {}'.format(repr(word)))
continue
vectors.extend((float(x) for x in entries))
itos.append(word)
self.itos = itos
self.stoi = {word: i for (i, word) in enumerate(itos)}
self.vectors = torch.Tensor(vectors).view((- 1), dim)
self.dim = dim
logger.info('Saving vectors to {}'.format(path_pt))
torch.save((self.itos, self.stoi, self.vectors, self.dim), path_pt)
else:
logger.info('Loading vectors from {}'.format(path_pt))
(self.itos, self.stoi, self.vectors, self.dim) = torch.load(path_pt) |
def get_coords(conformer, sites, avg_pos, ringatoms_flat):
logger.debug('Entering get_coords()')
logger.debug('Entering get_coords()')
site_coords = []
for i in range(len(sites)):
if (sites[i] in ringatoms_flat):
site_coords.append(np.array([conformer.GetAtomPosition(int(sites[i]))[j] for j in range(3)]))
else:
site_coords.append(np.array(avg_pos[i]))
return site_coords |
class OpenAIRunExpander(RunExpander):
name = 'openai'
def __init__(self):
pass
def expand(self, run_spec: RunSpec) -> List[RunSpec]:
if (run_spec.adapter_spec.method != ADAPT_GENERATION):
return [run_spec]
return [replace(run_spec, name=run_spec.name, adapter_spec=replace(run_spec.adapter_spec, global_prefix=(IN_CONTEXT_LEARNING_INSTRUCTIONS_PREFIX + '\n\n'), global_suffix=((('\n\n' + IN_CONTEXT_LEARNING_INSTRUCTIONS_SUFFIX) + '\n') + run_spec.adapter_spec.output_prefix.strip())))] |
def prune_layer(layer: Union[(nn.Linear, Conv1D)], index: torch.LongTensor, dim: Optional[int]=None) -> Union[(nn.Linear, Conv1D)]:
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=(0 if (dim is None) else dim))
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=(1 if (dim is None) else dim))
else:
raise ValueError(f"Can't prune layer of class {layer.__class__}") |
def define_flags():
define_flag = {'boolean': flags.DEFINE_boolean, 'float': flags.DEFINE_float, 'integer': flags.DEFINE_integer, 'string': flags.DEFINE_string}
for (name, param_spec) in six.iteritems(_DEFAULT_PARAMS):
if (param_spec.flag_type not in define_flag):
raise ValueError(('Unknown flag_type %s' % param_spec.flag_type))
else:
define_flag[param_spec.flag_type](name, param_spec.default_value, param_spec.description)
flags.declare_key_flag(name) |
class TestGraph(TestCase):
def test_shortest_path(self):
triples = [('1', '2', '3'), ('3', '4', '5'), ('1', '0', '5')]
self.assertEqual(Graph(triples).shortest_path('1', '5'), ['1', '0', '5'])
self.assertEqual(Graph(triples[:2]).shortest_path('1', '5'), ['1', '2', '3', '4', '5']) |
def make_image_list(list_file, image_dir, name, offset=1000):
with open(list_file) as fp:
wnid_list = [line.strip() for line in fp]
save_file = os.path.join(data_dir, 'list', ('img-%s.txt' % name))
wr_fp = open(save_file, 'w')
for (i, wnid) in enumerate(wnid_list):
img_list = glob.glob(os.path.join(image_dir, wnid, '*.JPEG'))
for path in img_list:
index = os.path.join(wnid, os.path.basename(path))
l = (i + offset)
wr_fp.write(('%s %d\n' % (index, l)))
if (len(img_list) == 0):
print(('Warning: does not have class %s. Do you forgot to download the picture??' % wnid))
wr_fp.close() |
def local_bad_density_congruence(self, p, m, Zvec=None, NZvec=None):
return (self.local_badI_density_congruence(p, m, Zvec, NZvec) + self.local_badII_density_congruence(p, m, Zvec, NZvec)) |
def _drop_option_preserving_form(layout, ensure_empty_mask: bool=False):
from awkward._do import recursively_apply
from awkward.contents import UnmaskedArray, IndexedOptionArray, IndexedArray
def action(_, continuation, **kwargs):
this = continuation()
if (not this.is_option):
return this
elif isinstance(this, UnmaskedArray):
return this.content
else:
index_nplike = this.backend.index_nplike
assert (not (ensure_empty_mask and index_nplike.known_data and index_nplike.any(this.mask_as_bool(valid_when=False)))), 'did not expect option type, but arrow returned a non-erasable option'
if isinstance(this, IndexedOptionArray):
return IndexedArray(this.index, this.content, parameters=this._parameters)
else:
return this.content
return recursively_apply(layout, action, return_simplified=False) |
class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
def forward(self, input):
return F.adaptive_max_pool2d(input, self.output_size, self.return_indices) |
def main():
dim = int(input('size of the array: '))
i = [Int(f'i_{x}') for x in range((dim + 1))]
j = [Int(f'j_{x}') for x in range((dim + 1))]
A = [Array(f'A_{x}', IntSort(), IntSort()) for x in range((dim + 1))]
tmp = [Int(f'tmp_{x}') for x in range(dim)]
s = Solver()
init_condition = init(i[0], j[0])
s.add(init_condition)
tran_condition = mk_tran_condition(A, i, j, tmp, dim)
s.add(And(*tran_condition))
values = [Int(f'n_{x}') for x in range(dim)]
init_check_condition = check(values, A[(- 1)], dim)
s.add(And(*init_check_condition))
post_condition = mk_post_condition(values)
print('Bubble sort')
print('')
s.push()
s.add(Not(post_condition))
print('Testing the validity of the algorithm; `valid expected`:')
if (s.check() == sat):
print(f'''counterexample:
{s.model()}''')
else:
print('valid')
print('')
s.pop()
s.add(post_condition)
print('Getting a model...')
print('Model:')
if (s.check() == sat):
print(s.model()) |
def _patch_missing_buffers_for_deser(lev_model, lm_model_cls, Vocab, config, key, axis_mapping):
(dtype_structs, real_arrays) = eqx.partition(lev_model, (lambda x: isinstance(x, jax.ShapeDtypeStruct)))
buffer_leaves = jax.tree_util.tree_leaves(dtype_structs)
if (len(buffer_leaves) == 0):
return lev_model
_jit(axis_resources=axis_mapping)
def _init_buffers():
new_model = lm_model_cls.init(Vocab, config, key=key)
def select_if_missing(missing_leaf, new_value):
if isinstance(missing_leaf, jax.ShapeDtypeStruct):
return new_value
else:
return None
return jax.tree_map(select_if_missing, dtype_structs, new_model, is_leaf=(lambda x: (x is None)))
new_buffers = _init_buffers()
result = eqx.combine(real_arrays, new_buffers)
return result |
class LRS2Pretrain(Dataset):
def __init__(self, dataset, datadir, numWords, charToIx, stepSize, videoParams):
super(LRS2Pretrain, self).__init__()
with open((((datadir + '/') + dataset) + '.txt'), 'r') as f:
lines = f.readlines()
self.datalist = [((datadir + '/pretrain/') + line.strip()) for line in lines]
self.numWords = numWords
self.charToIx = charToIx
self.dataset = dataset
self.stepSize = stepSize
self.videoParams = videoParams
return
def __getitem__(self, index):
if (self.dataset == 'pretrain'):
base = (self.stepSize * np.arange((int((len(self.datalist) / self.stepSize)) + 1)))
ixs = (base + index)
ixs = ixs[(ixs < len(self.datalist))]
index = np.random.choice(ixs)
visualFeaturesFile = (self.datalist[index] + '.npy')
targetFile = (self.datalist[index] + '.txt')
(inp, trgt, inpLen, trgtLen) = prepare_pretrain_input(visualFeaturesFile, targetFile, self.numWords, self.charToIx, self.videoParams)
return (inp, trgt, inpLen, trgtLen)
def __len__(self):
if (self.dataset == 'pretrain'):
return self.stepSize
else:
return len(self.datalist) |
def split_normalize_with_librosa(audio, top_db=50, frame_length=1024, hop_length=256, skip_idx=0):
edges = librosa.effects.split(audio, top_db=top_db, frame_length=frame_length, hop_length=hop_length)
new_audio = np.zeros_like(audio)
for (idx, (start, end)) in enumerate(edges[skip_idx:]):
segment = audio[start:end]
if (start == end):
print('Warning: splitting in librosa resulted in an empty segment')
continue
new_audio[start:end] = librosa.util.normalize(segment)
return new_audio |
_utils.test()
def test_python_scope_inplace_operator():
for ops in inplace_operation_types:
(a, b) = test_matrix_arrays[:2]
(m1, m2) = (ti.Matrix(a), ti.Matrix(b))
m1 = ops(m1, m2)
assert np.allclose(m1.to_numpy(), ops(a, b)) |
def update_cfg(cfg_file):
cfg = get_cfg_defaults()
cfg.merge_from_file(cfg_file)
return cfg.clone() |
def single_token_to_obj(token):
parts = token.split('_')
if (parts[0] == 'clef'):
if (parts[1] == 'treble'):
return clef.TrebleClef()
elif (parts[1] == 'bass'):
return clef.BassClef()
elif (parts[0] == 'key'):
if (parts[1] == 'sharp'):
return key.KeySignature(int(parts[2]))
elif (parts[1] == 'flat'):
return key.KeySignature(((- 1) * int(parts[2])))
elif (parts[1] == 'natural'):
return key.KeySignature(0)
elif (parts[0] == 'time'):
if ('/' in parts[1]):
return meter.TimeSignature(parts[1])
else:
return meter.TimeSignature(((parts[1] + '/4') if (int(parts[1]) < 6) else (parts[1] + '/8'))) |
class SModel(fluid.dygraph.Layer):
def __init__(self, embedding_size: int, hidden_size: int, max_len: int, num_subs: int):
super(SModel, self).__init__(None)
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.max_len = max_len
self.num_subs = num_subs
self.position_embedding = fluid.dygraph.Embedding(size=[self.max_len, self.hidden_size])
self.linear = fluid.dygraph.Linear(self.embedding_size, self.hidden_size)
self.gdr1 = GatedDilatedResidualConv1D(self.hidden_size, 1)
self.gdr2 = GatedDilatedResidualConv1D(self.hidden_size, 2)
self.gdr3 = GatedDilatedResidualConv1D(self.hidden_size, 4)
self.gdr4 = GatedDilatedResidualConv1D(self.hidden_size, 1)
self.gdr5 = GatedDilatedResidualConv1D(self.hidden_size, 1)
self.gdr6 = GatedDilatedResidualConv1D(self.hidden_size, 1)
self.attention = MultiHeadSelfAttention(self.hidden_size, 8, 16)
self.conv1d = Conv1D(input_dim=(2 * self.hidden_size), output_dim=self.hidden_size, kernel_size=3, act='relu')
self.sub_heads = fluid.dygraph.Linear(self.hidden_size, (self.num_subs + 1))
self.sub_tails = fluid.dygraph.Linear(self.hidden_size, (self.num_subs + 1))
def forward(self, token_ids, sent_vec):
mask = fluid.layers.unsqueeze(token_ids, [2])
mask = fluid.layers.cast((mask > fluid.dygraph.to_variable(np.array([0]))), 'float32')
position = pos(token_ids)
position_embeddings = self.position_embedding(position)
sent_vec = self.linear(sent_vec)
features = fluid.layers.elementwise_add(sent_vec, position_embeddings)
features = (fluid.layers.dropout(features, dropout_prob=0.25) * mask)
features = self.gdr1(features, mask)
features = self.gdr2(features, mask)
features = self.gdr3(features, mask)
features = self.gdr4(features, mask)
features = self.gdr5(features, mask)
features = self.gdr6(features, mask)
attention_features = self.attention(features, features, features, mask)
sub_features = fluid.layers.concat([features, attention_features], axis=(- 1))
sub_features = self.conv1d(sub_features)
pred_sub_heads = self.sub_heads(sub_features)
pred_sub_tails = self.sub_tails(sub_features)
return (pred_sub_heads, pred_sub_tails, features, mask) |
def wide_resnet_cifar_bn_wo_pooling_dropout():
return Wide_ResNet(10, 4, 0.3, 10, use_bn=True, use_pooling=False) |
def get_inference_trainer_params():
return d(cls=LatentInferenceTrainer, params=d(train_every_n_steps=(1 if USE_LATENT else 0), latent_learning_rate=0.0001, log_every_n_steps=100.0, save_every_n_steps=0, train_min_buffer_size=2, obs_to_output_obs_fn=obs_to_output_obs_fn)) |
def eulers_method_2x2(f, g, t0, x0, y0, h, t1, algorithm='table'):
if (algorithm == 'table'):
print(('%10s %20s %25s %20s %20s' % ('t', 'x', 'h*f(t,x,y)', 'y', 'h*g(t,x,y)')))
n = int(((1.0 * (t1 - t0)) / h))
t00 = t0
x00 = x0
y00 = y0
soln = [[t00, x00, y00]]
for i in range((n + 1)):
if (algorithm == 'table'):
print(('%10r %20r %25r %20r %20r' % (t00, x00, (h * f(t00, x00, y00)), y00, (h * g(t00, x00, y00)))))
x01 = (x00 + (h * f(t00, x00, y00)))
y00 = (y00 + (h * g(t00, x00, y00)))
x00 = x01
t00 = (t00 + h)
soln.append([t00, x00, y00])
if (algorithm != 'table'):
return soln |
class FlaxBlenderbotForConditionalGeneration(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def format_prompt(example: dict, prompt_dict: dict) -> str:
assert (('instruction' in example) and ('input' in example)), 'Internal error: example missing required keys.'
if ((example['input'] is None) or (len(example['input']) == 0)):
formatted_prompt = prompt_dict['prompt_noinputs'].format_map(example)
else:
formatted_prompt = prompt_dict['prompt_inputs'].format_map(example)
return formatted_prompt |
class Sampler():
def __init__(self, indexed_ratings):
self._indexed_ratings = indexed_ratings
self._users = list(self._indexed_ratings.keys())
self._nusers = len(self._users)
self._items = list({k for a in self._indexed_ratings.values() for k in a.keys()})
self._nitems = len(self._items)
self._ui_dict = {u: list(set(indexed_ratings[u])) for u in indexed_ratings}
self._lui_dict = {u: len(v) for (u, v) in self._ui_dict.items()}
def step(self, events: int, batch_size: int):
r_int = np.random.randint
n_users = self._nusers
n_items = self._nitems
ui_dict = self._ui_dict
lui_dict = self._lui_dict
def sample():
u = r_int(n_users)
ui = ui_dict[u]
lui = lui_dict[u]
if (lui == n_items):
sample()
b = random.getrandbits(1)
if b:
i = ui[r_int(lui)]
else:
i = r_int(n_items)
while (i in ui):
i = r_int(n_items)
return (u, i, b)
for batch_start in range(0, events, batch_size):
(u, i, b) = map(np.array, zip(*[sample() for _ in range(batch_start, min((batch_start + batch_size), events))]))
(yield (u, i, b)) |
def round_if_needed(x, eps=0.0001):
if (x is None):
return x
temp = float(np.ceil(x))
if ((temp - x) < eps):
return temp
temp = float(np.floor(x))
if ((x - temp) < eps):
return temp
return float(x) |
.parametrize('X', [[['1', '2'], ['3', '4']], np.array([['1', '2'], ['3', '4']], dtype='U'), np.array([['1', '2'], ['3', '4']], dtype='S'), [[b'1', b'2'], [b'3', b'4']], np.array([[b'1', b'2'], [b'3', b'4']], dtype='V1')])
def test_check_array_numeric_error(X):
expected_msg = "dtype='numeric' is not compatible with arrays of bytes/strings"
with pytest.raises(ValueError, match=expected_msg):
check_array(X, dtype='numeric') |
_utils.test(arch=[ti.opengl, ti.vulkan])
def test_opengl_8_ssbo():
n = 4
density1 = ti.ndarray(dtype=ti.f32, shape=(4, 4))
density2 = ti.ndarray(dtype=ti.f32, shape=(4, 4))
density3 = ti.ndarray(dtype=ti.f32, shape=(4, 4))
density4 = ti.ndarray(dtype=ti.f32, shape=(4, 4))
density5 = ti.ndarray(dtype=ti.f32, shape=(4, 4))
density6 = ti.ndarray(dtype=ti.f32, shape=(4, 4))
def init(d: ti.i32, density1: ti.types.ndarray(), density2: ti.types.ndarray(), density3: ti.types.ndarray(), density4: ti.types.ndarray(), density5: ti.types.ndarray(), density6: ti.types.ndarray()):
for (i, j) in density1:
density1[(i, j)] = (d + 1)
density2[(i, j)] = (d + 2)
density3[(i, j)] = (d + 3)
density4[(i, j)] = (d + 4)
density5[(i, j)] = (d + 5)
density6[(i, j)] = (d + 6)
init(0, density1, density2, density3, density4, density5, density6)
assert (density1.to_numpy() == (np.zeros(shape=(n, n)) + 1)).all()
assert (density2.to_numpy() == (np.zeros(shape=(n, n)) + 2)).all()
assert (density3.to_numpy() == (np.zeros(shape=(n, n)) + 3)).all()
assert (density4.to_numpy() == (np.zeros(shape=(n, n)) + 4)).all()
assert (density5.to_numpy() == (np.zeros(shape=(n, n)) + 5)).all()
assert (density6.to_numpy() == (np.zeros(shape=(n, n)) + 6)).all() |
class BPRSlimModel(object):
def __init__(self, data, num_users, num_items, lr, lj_reg, li_reg, sampler, random_seed=42):
self._data = data
self._num_users = num_users
self._num_items = num_items
self._sp_i_train_ratings = self._data.sp_i_train_ratings
self._lr = lr
self._lj_reg = lj_reg
self._li_reg = li_reg
self._sampler = sampler
self._random_seed = random_seed
self._random_state = np.random.RandomState(self._random_seed)
self._mask_indices = np.array(self._sp_i_train_ratings.indices, dtype=np.int32)
self._mask_indptr = np.array(self._sp_i_train_ratings.indptr, dtype=np.int32)
self._s_dense = np.empty((self._num_items, self._num_items), np.double)
def train_step(self, batch):
(u, i, j) = batch
x_uij = 0.0
index = 0
seen_items_start_pos = self._mask_indptr[u]
seen_items_end_pos = self._mask_indptr[(u + 1)]
while (index < (seen_items_end_pos - seen_items_start_pos)):
seenItem = self._mask_indices[(seen_items_start_pos + index)]
index += 1
x_uij += (self._s_dense[(i, seenItem)] - self._s_dense[(j, seenItem)])
gradient = (1 / (1 + np.exp(x_uij)))
loss = (np.sum(x_uij) ** 2)
index = 0
while (index < (seen_items_end_pos - seen_items_start_pos)):
seenItem = self._mask_indices[(seen_items_start_pos + index)]
index += 1
if (seenItem != i):
self._s_dense[(i, seenItem)] += (self._lr * (gradient - (self._li_reg * self._s_dense[(i, seenItem)])))
if (seenItem != j):
self._s_dense[(j, seenItem)] -= (self._lr * (gradient - (self._lj_reg * self._s_dense[(j, seenItem)])))
return loss
def predict(self, u, i):
x_ui = 0.0
index = 0
seen_items_start_pos = self._mask_indptr[u]
seen_items_end_pos = self._mask_indptr[(u + 1)]
while (index < (seen_items_end_pos - seen_items_start_pos)):
seenItem = self._mask_indices[(seen_items_start_pos + index)]
index += 1
x_ui += self._s_dense[(i, seenItem)]
return x_ui
def get_user_recs(self, user, mask, k=100):
user_mask = mask[self._data.public_users[user]]
predictions = {i: self.predict(user, i) for i in self._data.items if user_mask[self._data.public_items[i]]}
(indices, values) = zip(*predictions.items())
indices = np.array(indices)
values = np.array(values)
local_k = min(k, len(values))
partially_ordered_preds_indices = np.argpartition(values, (- local_k))[(- local_k):]
real_values = values[partially_ordered_preds_indices]
real_indices = indices[partially_ordered_preds_indices]
local_top_k = real_values.argsort()[::(- 1)]
return [(real_indices[item], real_values[item]) for item in local_top_k]
def get_model_state(self):
saving_dict = {}
saving_dict['_s_dense'] = self._s_dense
return saving_dict
def set_model_state(self, saving_dict):
self._s_dense = saving_dict['_s_dense']
def load_weights(self, path):
with open(path, 'rb') as f:
self.set_model_state(pickle.load(f))
def save_weights(self, path):
with open(path, 'wb') as f:
pickle.dump(self.get_model_state(), f) |
class LocalPlotSaver(BasePlotSaver):
def __init__(self, request, assets_dirpath):
super(LocalPlotSaver, self).__init__(request, assets_dirpath=assets_dirpath)
def upload(self, report):
for (plot, name) in self._plots:
self.save(plot, os.path.join(self.assets_dirpath, f'{name}.png'), report)
self.plot_html.append(extras.html(thumbnail_html_local.format(name=name))) |
class ObjectDetectionEvaluation(object):
def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5, nms_iou_threshold=1.0, nms_max_output_boxes=10000, use_weighted_mean_ap=False, label_id_offset=0):
if (num_groundtruth_classes < 1):
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes=num_groundtruth_classes, matching_iou_threshold=matching_iou_threshold)
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_masks = {}
self.groundtruth_is_difficult_list = {}
self.groundtruth_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=int)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = []
self.recalls_per_class = []
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def add_single_ground_truth_image_info(self, image_key, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_list=None, groundtruth_is_group_of_list=None, groundtruth_masks=None):
if (image_key in self.groundtruth_boxes):
logging.warning('image %s has already been added to the ground truth database.', image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
self.groundtruth_masks[image_key] = groundtruth_masks
if (groundtruth_is_difficult_list is None):
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_difficult_list[image_key] = groundtruth_is_difficult_list.astype(dtype=bool)
if (groundtruth_is_group_of_list is None):
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_group_of_list[image_key] = groundtruth_is_group_of_list.astype(dtype=bool)
self._update_ground_truth_statistics(groundtruth_class_labels, groundtruth_is_difficult_list.astype(dtype=bool), groundtruth_is_group_of_list.astype(dtype=bool))
def add_single_detected_image_info(self, image_key, detected_boxes, detected_scores, detected_class_labels, detected_masks=None):
if ((len(detected_boxes) != len(detected_scores)) or (len(detected_boxes) != len(detected_class_labels))):
raise ValueError(('detected_boxes, detected_scores and detected_class_labels should all have same lengths. Got[%d, %d, %d]' % len(detected_boxes)), len(detected_scores), len(detected_class_labels))
if (image_key in self.detection_keys):
logging.warning('image %s has already been added to the detection result database', image_key)
return
self.detection_keys.add(image_key)
if (image_key in self.groundtruth_boxes):
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
groundtruth_masks = self.groundtruth_masks.pop(image_key)
groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[image_key]
groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[image_key]
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
if (detected_masks is None):
groundtruth_masks = None
else:
groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
groundtruth_is_difficult_list = np.array([], dtype=bool)
groundtruth_is_group_of_list = np.array([], dtype=bool)
(scores, tp_fp_labels) = self.per_image_eval.compute_object_detection_metrics(detected_boxes=detected_boxes, detected_scores=detected_scores, detected_class_labels=detected_class_labels, groundtruth_boxes=groundtruth_boxes, groundtruth_class_labels=groundtruth_class_labels, groundtruth_is_difficult_list=groundtruth_is_difficult_list, groundtruth_is_group_of_list=groundtruth_is_group_of_list, detected_masks=detected_masks, groundtruth_masks=groundtruth_masks)
for i in range(self.num_class):
if (scores[i].shape[0] > 0):
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
def _update_ground_truth_statistics(self, groundtruth_class_labels, groundtruth_is_difficult_list, groundtruth_is_group_of_list):
for class_index in range(self.num_class):
num_gt_instances = np.sum((groundtruth_class_labels[((~ groundtruth_is_difficult_list) & (~ groundtruth_is_group_of_list))] == class_index))
self.num_gt_instances_per_class[class_index] += num_gt_instances
if np.any((groundtruth_class_labels == class_index)):
self.num_gt_imgs_per_class[class_index] += 1
def evaluate(self):
if (self.num_gt_instances_per_class == 0).any():
logging.info('The following classes have no ground truth examples: %s', (np.squeeze(np.argwhere((self.num_gt_instances_per_class == 0))) + self.label_id_offset))
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if (self.num_gt_instances_per_class[class_index] == 0):
continue
if (not self.scores_per_class[class_index]):
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=bool)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
(precision, recall) = metrics.compute_precision_recall(scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
self.precisions_per_class.append(precision)
self.recalls_per_class.append(recall)
average_precision = metrics.compute_average_precision(precision, recall)
self.average_precision_per_class[class_index] = average_precision
self.corloc_per_class = metrics.compute_cor_loc(self.num_gt_imgs_per_class, self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
(precision, recall) = metrics.compute_precision_recall(all_scores, all_tp_fp_labels, num_gt_instances)
mean_ap = metrics.compute_average_precision(precision, recall)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return ObjectDetectionEvalMetrics(self.average_precision_per_class, mean_ap, self.precisions_per_class, self.recalls_per_class, self.corloc_per_class, mean_corloc) |
def load_data(args):
if (args.dataset_str == 'dblp'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_data_dblp()
if (args.dataset_str == 'example'):
(adj_list, features, train_data, train_label, test_data, test_label) = load_example_gem()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return (adj_list, features, train_data, train_label, test_data, test_label, paras) |
def _insert_encoder_sequence_length_node_(gm: GraphModule, lint_and_recompile: bool=True) -> Node:
graph = gm.graph
input_names = set(gm.dummy_inputs.keys())
encoder_sequence_length_node = None
for node in graph.nodes:
if ((node.op == 'placeholder') and (node.name in input_names) and ('decoder' not in node.name)):
with graph.inserting_after(node):
encoder_sequence_length_node = graph.call_method('size', args=(node, (1 if (gm.num_choices < 0) else 2)))
if (encoder_sequence_length_node is None):
raise ValueError('Could not insert the node that computes the encoder sequence length')
if lint_and_recompile:
graph.lint()
gm.recompile()
if hasattr(gm, '_qconfig_map'):
gm._qconfig_map[encoder_sequence_length_node.name] = None
return encoder_sequence_length_node |
def _validate_pdist_input(X, m, n, metric_info, **kwargs):
types = metric_info.types
typ = (types[types.index(X.dtype)] if (X.dtype in types) else types[0])
X = _convert_to_type(X, out_type=typ)
_validate_kwargs = metric_info.validator
if _validate_kwargs:
kwargs = _validate_kwargs(X, m, n, **kwargs)
return (X, typ, kwargs) |
def _get_vcoco_instances_meta():
thing_ids = [k['id'] for k in VCOCO_OBJECTS if (k['isthing'] == 1)]
thing_colors = [k['color'] for k in VCOCO_OBJECTS if (k['isthing'] == 1)]
assert (len(thing_ids) == 80), len(thing_ids)
thing_dataset_id_to_contiguous_id = {k: i for (i, k) in enumerate(thing_ids)}
thing_classes = [k['name'] for k in VCOCO_OBJECTS if (k['isthing'] == 1)]
known_classes = [k['name'] for k in VCOCO_OBJECTS if (k['isthing'] == 1)]
novel_classes = []
person_cls_id = [k['id'] for k in VCOCO_OBJECTS if (k['name'] == 'person')][0]
action_classes = [k['name'] for k in VCOCO_ACTIONS]
ret = {'thing_dataset_id_to_contiguous_id': thing_dataset_id_to_contiguous_id, 'thing_classes': thing_classes, 'thing_colors': thing_colors, 'known_classes': known_classes, 'novel_classes': novel_classes, 'person_cls_id': person_cls_id, 'action_classes': action_classes}
return ret |
class DenseResidualBlock(nn.Module):
def __init__(self, filters, res_scale=0.2):
super(DenseResidualBlock, self).__init__()
self.res_scale = res_scale
def block(in_features, non_linearity=True):
layers = [nn.Conv2d(in_features, filters, 3, 1, 1, bias=True)]
if non_linearity:
layers += [nn.LeakyReLU()]
return nn.Sequential(*layers)
self.b1 = block(in_features=(1 * filters))
self.b2 = block(in_features=(2 * filters))
self.b3 = block(in_features=(3 * filters))
self.b4 = block(in_features=(4 * filters))
self.b5 = block(in_features=(5 * filters), non_linearity=False)
self.blocks = [self.b1, self.b2, self.b3, self.b4, self.b5]
def forward(self, x):
inputs = x
for block in self.blocks:
out = block(inputs)
inputs = torch.cat([inputs, out], 1)
return (out.mul(self.res_scale) + x) |
class AsymptoticExpansionGenerators(SageObject):
def Stirling(var, precision=None, skip_constant_factor=False):
if (precision is None):
precision = series_precision()
if (precision < 3):
raise ValueError('precision must be at least 3')
log_Stirling = AsymptoticExpansionGenerators.log_Stirling(var, precision=precision, skip_constant_summand=True)
P = log_Stirling.parent().change_parameter(growth_group='(e^({n}*log({n})))^QQ * (e^{n})^QQ * {n}^QQ * log({n})^QQ'.format(n=var))
from sage.functions.log import exp
result = exp(P(log_Stirling))
if (not skip_constant_factor):
from sage.symbolic.ring import SR
SCR = SR.subring(no_variables=True)
result *= (2 * SCR('pi')).sqrt()
return result
def log_Stirling(var, precision=None, skip_constant_summand=False):
if (not skip_constant_summand):
from sage.symbolic.ring import SR
coefficient_ring = SR.subring(no_variables=True)
else:
from sage.rings.rational_field import QQ
coefficient_ring = QQ
from .asymptotic_ring import AsymptoticRing
A = AsymptoticRing(growth_group='{n}^ZZ * log({n})^ZZ'.format(n=var), coefficient_ring=coefficient_ring)
n = A.gen()
if (precision is None):
precision = series_precision()
log = A.locals()['log']
result = A.zero()
if (precision >= 1):
result += (n * log(n))
if (precision >= 2):
result += (- n)
if (precision >= 3):
result += (log(n) / 2)
if ((precision >= 4) and (not skip_constant_summand)):
result += (log((2 * coefficient_ring('pi'))) / 2)
result += AsymptoticExpansionGenerators._log_StirlingNegativePowers_(var, (precision - 4))
if (precision < 1):
result += (n * log(n)).O()
elif (precision == 1):
result += n.O()
elif (precision == 2):
result += log(n).O()
elif (precision == 3):
result += A(1).O()
return result
def _log_StirlingNegativePowers_(var, precision):
from .asymptotic_ring import AsymptoticRing
from sage.rings.rational_field import QQ
A = AsymptoticRing(growth_group='{n}^ZZ'.format(n=var), coefficient_ring=QQ)
if (precision < 0):
return A.zero()
n = A.gen()
from sage.arith.misc import bernoulli
from sage.arith.srange import srange
result = sum(((((bernoulli(k) / k) / (k - 1)) / (n ** (k - 1))) for k in srange(2, ((2 * precision) + 2), 2)), A.zero())
return (result + (1 / (n ** ((2 * precision) + 1))).O())
def HarmonicNumber(var, precision=None, skip_constant_summand=False):
if (not skip_constant_summand):
from sage.symbolic.ring import SR
coefficient_ring = SR.subring(no_variables=True)
else:
from sage.rings.rational_field import QQ
coefficient_ring = QQ
from .asymptotic_ring import AsymptoticRing
A = AsymptoticRing(growth_group='{n}^ZZ * log({n})^ZZ'.format(n=var), coefficient_ring=coefficient_ring)
n = A.gen()
if (precision is None):
precision = series_precision()
log = A.locals()['log']
result = A.zero()
if (precision >= 1):
result += log(n)
if ((precision >= 2) and (not skip_constant_summand)):
from sage.symbolic.constants import euler_gamma
result += coefficient_ring(euler_gamma)
if (precision >= 3):
result += (1 / (2 * n))
from sage.arith.srange import srange
from sage.arith.misc import bernoulli
for k in srange(2, ((2 * precision) - 4), 2):
result += (((- bernoulli(k)) / k) / (n ** k))
if (precision < 1):
result += log(n).O()
elif (precision == 1):
result += A(1).O()
elif (precision == 2):
result += (1 / n).O()
else:
result += (1 / (n ** ((2 * precision) - 4))).O()
return result
def Binomial_kn_over_n(var, k, precision=None, skip_constant_factor=False):
from sage.symbolic.ring import SR
SCR = SR.subring(no_variables=True)
try:
SCR.coerce(k)
except TypeError as e:
from .misc import combine_exceptions
raise combine_exceptions(TypeError('Cannot use k={}.'.format(k)), e)
if (precision is None):
precision = series_precision()
S = AsymptoticExpansionGenerators._log_StirlingNegativePowers_(var, precision=max((precision - 2), 0))
n = S.parent().gen()
result = ((S.subs(n=(k * n)) - S.subs(n=((k - 1) * n))) - S).exp()
from sage.rings.rational_field import QQ
P = S.parent().change_parameter(growth_group='(QQ_+)^{n} * {n}^QQ'.format(n=var), coefficient_ring=QQ)
n = P.gen()
b = ((k ** k) / ((k - 1) ** (k - 1)))
if (b.parent() is SR):
b = SCR(b).canonicalize_radical()
result *= n.rpow(b)
result *= (n ** ((- QQ(1)) / QQ(2)))
if (not skip_constant_factor):
result *= (k / (((k - 1) * 2) * SCR('pi'))).sqrt()
return result
def SingularityAnalysis(var, zeta=1, alpha=0, beta=0, delta=0, precision=None, normalized=True):
from itertools import islice, count
from .asymptotic_ring import AsymptoticRing
from .growth_group import ExponentialGrowthGroup, MonomialGrowthGroup, GenericNonGrowthGroup
from sage.arith.misc import falling_factorial
from sage.categories.cartesian_product import cartesian_product
from sage.functions.other import binomial
from sage.functions.gamma import gamma
from sage.calculus.calculus import limit
from sage.misc.cachefunc import cached_function
from sage.arith.srange import srange
from sage.rings.integer_ring import ZZ
from sage.symbolic.ring import SR
SCR = SR.subring(no_variables=True)
s = SR('s')
iga = (1 / gamma(alpha))
if (iga.parent() is SR):
try:
iga = SCR(iga)
except TypeError:
pass
coefficient_ring = iga.parent()
if (beta != 0):
coefficient_ring = SCR
_function
def inverse_gamma_derivative(shift, r):
if (r == 0):
result = (iga * falling_factorial((alpha - 1), shift))
else:
result = limit((1 / gamma(s)).diff(s, r), s=(alpha - shift))
try:
return coefficient_ring(result)
except TypeError:
return result
if isinstance(alpha, int):
alpha = ZZ(alpha)
if isinstance(beta, int):
beta = ZZ(beta)
if isinstance(delta, int):
delta = ZZ(delta)
if (precision is None):
precision = series_precision()
if ((not normalized) and (not ((beta in ZZ) and (delta in ZZ)))):
raise ValueError('beta and delta must be integers')
if (delta != 0):
raise NotImplementedError('not implemented for delta!=0')
groups = []
non_growth_groups = []
if (zeta != 1):
E = ExponentialGrowthGroup.factory((~ zeta).parent(), var, return_factors=True)
for factor in E:
if isinstance(factor, GenericNonGrowthGroup):
non_growth_groups.append(factor)
else:
groups.append(factor)
groups.append(MonomialGrowthGroup(alpha.parent(), var))
if (beta != 0):
groups.append(MonomialGrowthGroup(beta.parent(), 'log({})'.format(var)))
groups.extend(non_growth_groups)
group = cartesian_product(groups)
A = AsymptoticRing(growth_group=group, coefficient_ring=coefficient_ring, default_prec=precision)
n = A.gen()
if (zeta == 1):
exponential_factor = 1
else:
exponential_factor = A(n.rpow((~ zeta)))
polynomial_factor = A((n ** (alpha - 1)))
if (beta != 0):
log_n = n.log()
logarithmic_factor = (log_n ** beta)
else:
log_n = 1
logarithmic_factor = 1
if ((beta in ZZ) and (beta >= 0)):
it = ((k, r) for k in count() for r in srange((beta + 1)))
k_max = precision
else:
it = ((0, r) for r in count())
k_max = 0
it = reversed(list(islice(it, (int(precision) + 1))))
if normalized:
beta_denominator = beta
else:
beta_denominator = 0
L = _sa_coefficients_lambda_(max(1, k_max), beta=beta_denominator)
(k, r) = next(it)
result = ((n ** (- k)) * (log_n ** (- r))).O()
if ((alpha in ZZ) and (beta == 0)):
if ((alpha > 0) and (alpha <= precision)):
result = A(0)
elif ((alpha <= 0) and (precision > 0)):
from .misc import NotImplementedOZero
raise NotImplementedOZero(A, exact_part=A.zero())
for (k, r) in it:
result += (((binomial(beta, r) * sum((((L[(k, ell)] * ((- 1) ** ell)) * inverse_gamma_derivative(ell, r)) for ell in srange(k, ((2 * k) + 1)) if ((k, ell) in L)))) * (n ** (- k))) * (log_n ** (- r)))
result *= ((exponential_factor * polynomial_factor) * logarithmic_factor)
return result
(20050)
def ImplicitExpansion(var, phi, tau=None, precision=None):
from sage.symbolic.ring import SR
from sage.rings.rational_field import QQ
from sage.rings.integer_ring import ZZ
from sage.rings.asymptotic.asymptotic_ring import AsymptoticRing
from sage.arith.srange import srange
(y, u) = (SR('y'), SR('u'))
one_half = (QQ(1) / 2)
if (phi(QQ(0)).is_zero() or (phi(u) == (phi(0) + (u * phi(u).diff(u)(u=0))))):
raise ValueError('The function phi does not satisfy the requirements')
if (tau is None):
tau = _fundamental_constant_implicit_function_(phi=phi)
def H(y):
return ((tau / phi(tau)) - (y / phi(y)))
A = AsymptoticRing(growth_group='{Z}^QQ'.format(Z=var), coefficient_ring=SR, default_prec=precision)
if (precision is None):
precision = ZZ(A.default_prec)
Z = A.gen()
def ansatz(prec=precision):
if (prec < 1):
return A(1).O()
if (prec == 1):
return ((1 / Z) ** one_half).O()
return ((((- (((2 * tau) / phi(tau)) / H(y).diff(y, 2)(y=tau)).sqrt()) * ((1 / Z) ** one_half)) + sum(((SR('d{}'.format(j)) * ((1 / Z) ** (j * one_half))) for j in srange(2, prec)))) + ((1 / Z) ** (prec * one_half)).O())
z = SR('z')
z_expansion = (sum((((H(z).diff(z, k)(z=tau) / k.factorial()) * (ansatz(prec=((precision + 2) - k)) ** k)) for k in srange(2, precision))) + ((1 / Z) ** (precision * one_half)).O())
solution_dict = dict()
for k in srange(2, (precision - 1)):
coef = z_expansion.monomial_coefficient(((1 / Z) ** ((k + 1) * one_half)))
current_var = SR('d{k}'.format(k=k))
solution_dict[current_var] = coef.subs(solution_dict).simplify_rational().solve(current_var)[0].rhs()
return (A(tau) + ansatz(prec=(precision - 1)).map_coefficients((lambda term: term.subs(solution_dict).simplify_rational())))
(20050)
def ImplicitExpansionPeriodicPart(var, phi, period, tau=None, precision=None):
if (tau is None):
tau = _fundamental_constant_implicit_function_(phi=phi)
tau_p = (tau ** period)
aperiodic_expansion = asymptotic_expansions.ImplicitExpansion(var, phi=(lambda u: (phi((u ** (1 / period))) ** period)), tau=tau_p, precision=precision)
rho = (tau / phi(tau))
Z = aperiodic_expansion.parent().gen()
return ((1 / rho) * ((aperiodic_expansion / (1 - (1 / Z))) ** (1 / period)))
def InverseFunctionAnalysis(var, phi, tau=None, period=1, precision=None):
if (tau is None):
tau = _fundamental_constant_implicit_function_(phi=phi)
rho = (tau / phi(tau))
if (period == 1):
expansion = asymptotic_expansions.ImplicitExpansion(var=var, phi=phi, tau=tau, precision=precision)
return expansion._singularity_analysis_(var, zeta=rho, precision=precision)
expansion = asymptotic_expansions.ImplicitExpansionPeriodicPart(var=var, phi=phi, period=period, tau=tau, precision=precision)
growth = expansion._singularity_analysis_(var, zeta=(rho ** period), precision=precision)
n = growth.parent().gen()
return growth.subs({n: ((n - 1) / period)}) |
def sum_bn_array(arr, modulus):
if (not isinstance(modulus, Bn)):
modulus = Bn(modulus)
res = Bn(0)
for elem in arr:
if (not isinstance(elem, Bn)):
elem = Bn(elem)
res = res.mod_add(elem, modulus)
return res |
class Conv3DBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size=3):
super().__init__()
self.block = nn.Sequential(SingleConv3DBlock(in_planes, out_planes, kernel_size), nn.BatchNorm3d(out_planes), nn.ReLU(True))
def forward(self, x):
return self.block(x) |
(ignore_result=True)
def crawl_follower_fans(uid):
seed = SeedidsOper.get_seed_by_id(uid)
if (seed.other_crawled == 0):
rs = get_fans_or_followers_ids(uid, 1)
rs.extend(get_fans_or_followers_ids(uid, 2))
datas = set(rs)
if datas:
SeedidsOper.insert_seeds(datas)
SeedidsOper.set_seed_other_crawled(uid) |
def sage_getargspec(obj):
from sage.misc.lazy_attribute import lazy_attribute
from sage.misc.abstract_method import AbstractMethod
if inspect.isclass(obj):
return sage_getargspec(obj.__call__)
if isinstance(obj, (lazy_attribute, AbstractMethod)):
source = sage_getsource(obj)
return inspect.FullArgSpec(*_sage_getargspec_cython(source))
if (not callable(obj)):
raise TypeError('obj is not a code object')
try:
return inspect.FullArgSpec(*obj._sage_argspec_())
except (AttributeError, TypeError):
pass
docstring = _sage_getdoc_unformatted(obj)
try:
name = obj.__name__
except AttributeError:
name = type(obj).__name__
argspec = _extract_embedded_signature(docstring, name)[1]
if (argspec is not None):
return argspec
if hasattr(obj, '__code__'):
try:
(args, varargs, varkw) = inspect.getargs(obj.__code__)
return inspect.FullArgSpec(args, varargs, varkw, obj.__defaults__, kwonlyargs=[], kwonlydefaults=None, annotations={})
except (TypeError, AttributeError):
pass
if isclassinstance(obj):
if hasattr(obj, '_sage_src_'):
source = sage_getsource(obj)
try:
proxy = (('def dummy' + _grep_first_pair_of_parentheses(source)) + ':\n return')
return _sage_getargspec_from_ast(proxy)
except SyntaxError:
pass
if isinstance(obj, functools.partial):
base_spec = sage_getargspec(obj.func)
return base_spec
return sage_getargspec(obj.__class__.__call__)
elif (hasattr(obj, '__objclass__') and hasattr(obj, '__name__') and (obj.__name__ == 'next')):
return (['self'], None, None, None)
else:
try:
source = sage_getsource(obj)
except TypeError:
source = ''
if source:
return inspect.FullArgSpec(*_sage_getargspec_cython(source))
else:
func_obj = obj
try:
(args, varargs, varkw) = inspect.getargs(func_obj.__code__)
except AttributeError:
try:
(args, varargs, varkw) = inspect.getargs(func_obj)
except TypeError:
try:
return inspect.FullArgSpec(*_sage_getargspec_cython(sage_getsource(obj)))
except TypeError:
args = []
varargs = 'args'
varkw = 'kwds'
try:
defaults = func_obj.__defaults__
except AttributeError:
defaults = None
return inspect.FullArgSpec(args, varargs, varkw, defaults, kwonlyargs=[], kwonlydefaults=None, annotations={}) |
def get_video_names_and_annotations(data, subset):
video_names = []
annotations = []
for (key, value) in data['database'].items():
this_subset = value['subset']
if (this_subset == subset):
if (subset == 'testing'):
video_names.append('v_{}'.format(key))
else:
video_names.append('v_{}'.format(key))
annotations.append(value['annotations'])
return (video_names, annotations) |
def main(config_path: str):
config = read_json(config_path)
config['dropout'] = tune.choice(config['dropout'])
config['num_residuals_per_block'] = tune.choice(config['num_residuals_per_block'])
config['num_blocks'] = tune.choice(config['num_blocks'])
config['batch_size'] = tune.choice(config['batch_size'])
config['lr'] = tune.loguniform(*config['lr'])
config['weight_decay'] = tune.loguniform(*config['weight_decay'])
config['gamma'] = tune.loguniform(*config['gamma'])
scheduler = ASHAScheduler(metric='loss', mode='min', max_t=config['max_num_epochs'], grace_period=1, reduction_factor=2)
reporter = CLIReporter(metric_columns=['loss', 'accuracy', 'training_iteration'])
result = tune.run(partial(train), resources_per_trial={'cpu': config['cpus'], 'gpu': config['gpus_per_trial']}, config=config, num_samples=config['num_samples'], scheduler=scheduler, progress_reporter=reporter)
best_trial = result.get_best_trial('loss', 'min', 'last')
print('Best trial config: {}'.format(best_trial.config))
print('Best trial final validation loss: {}'.format(best_trial.last_result['loss']))
print('Best trial final validation accuracy: {}'.format(best_trial.last_result['accuracy'])) |
def test_saga_score():
(X, y) = make_classification(n_samples=1000, random_state=0)
pysaga = PySAGAClassifier(eta=0.001, alpha=0.0, beta=0.0, max_iter=1, penalty=None, random_state=0)
saga = SAGAClassifier(eta=0.001, alpha=0.0, beta=0.0, max_iter=1, penalty=None, random_state=0)
pysaga.fit(X, y)
saga.fit(X, y)
assert (pysaga.score(X, y) == saga.score(X, y)) |
class ConvTranspose3d(_ConvTransposeNd):
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t=1, padding: _size_3_t=0, output_padding: _size_3_t=0, groups: int=1, bias: bool=True, dilation: _size_3_t=1, padding_mode: str='zeros'):
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
super(ConvTranspose3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, True, output_padding, groups, bias, padding_mode)
def forward(self, input: Tensor, output_size: Optional[List[int]]=None) -> Tensor:
if (self.padding_mode != 'zeros'):
raise ValueError('Only `zeros` padding mode is supported for ConvTranspose3d')
output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size, self.dilation)
return F.conv_transpose3d(input, self.weight, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation) |
def entity_tag(length, tag_fmt='IOB'):
tags = (['O'] * length)
tag_fmt = set(tag_fmt)
if (tag_fmt == set('IOB')):
tags[0] = 'B'
tags[1:] = (len(tags[1:]) * 'I')
elif ((tag_fmt == set('IOBES')) or (tag_fmt == set('BILOU'))):
if (len(tags) == 1):
tags[0] = 'S'
else:
tags[0] = 'B'
tags[1:(- 1)] = (len(tags[1:(- 1)]) * 'I')
tags[(- 1):] = 'E'
elif (tag_fmt == set('IO')):
tags = (['I'] * len(tags))
return tags |
def get_skinning_decoder(cfg, device, dim=3, c_dim=0):
decoder = cfg['model']['skinning_decoder']
decoder_kwargs = cfg['model']['skinning_decoder_kwargs']
if (decoder is not None):
decoder_fwd = models.decoder_dict[decoder](dim=dim, out_dim=24, c_dim=c_dim, **decoder_kwargs).to(device)
decoder_bwd = models.decoder_dict[decoder](dim=dim, out_dim=24, c_dim=c_dim, **decoder_kwargs).to(device)
else:
decoder_fwd = decoder_bwd = None
return (decoder_fwd, decoder_bwd) |
class Block2(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block2, self).__init__()
if ((planes != inplanes) or (stride != 1)):
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = SynchronizedBatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
filters = planes
for i in range((reps - 1)):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
if (not grow_first):
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
if (not start_with_relu):
rep = rep[1:]
if (stride != 1):
self.block2_lastconv = nn.Sequential(*[self.relu, SeparableConv2d_same(planes, planes, 3, stride=2, dilation=dilation)])
if is_last:
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
low_middle = x.clone()
x1 = x
x1 = self.block2_lastconv(x1)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x1 += skip
return (x1, low_middle) |
def process_checkpoint(in_file):
tmp_file = (in_file + '.tmp')
subprocess.Popen(['cp', in_file, tmp_file])
sha = subprocess.check_output(['sha256sum', tmp_file]).decode()
out_file = in_file
if out_file.endswith('.pth'):
out_file = out_file[:(- 4)]
final_file = (out_file + f'-{sha[:8]}.pth')
assert (final_file != in_file), 'The output filename is the same as the input file.'
print('Output file: {}'.format(final_file))
subprocess.Popen(['mv', tmp_file, final_file]) |
class SentencepieceTokenizer():
def __init__(self, model_path: str=''):
if (not model_path):
model_path = download_model(ModelTypeEnum.SENTENCEPIECE)
self.model = bsp.SentencePieceProcessor()
self.model.Load(model_path)
def tokenize(self, text: str) -> List[str]:
tokens = self.model.EncodeAsPieces(text)
return tokens
def text2id(self, text: str) -> List[int]:
ids = self.model.EncodeAsIds(text)
return ids
def id2text(self, ids: List[int]) -> str:
text = self.model.DecodeIds(ids)
return text |
def _shadows_builtin_name(name: str) -> bool:
return ((name in builtins.__dict__) or (name in keyword.kwlist)) |
class RCCAModule(nn.Module):
def __init__(self, in_channels, out_channels, num_classes):
super(RCCAModule, self).__init__()
inter_channels = (in_channels // 4)
self.conv1a = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), InPlaceABNSync(inter_channels))
self.cca = CrissCrossAttention(inter_channels)
self.conv1b = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), InPlaceABNSync(inter_channels))
self.bottleneck = nn.Sequential(nn.Conv2d((in_channels + inter_channels), out_channels, kernel_size=3, padding=1, dilation=1, bias=False), InPlaceABNSync(out_channels), nn.Dropout2d(0.1), nn.Conv2d(out_channels, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x, recurrence=1):
output = self.conv1a(x)
for i in range(recurrence):
output = self.cca(output)
output = self.conv1b(output)
output = self.bottleneck(torch.cat([x, output], 1))
return output |
class ReferenceArea(AreaMetric):
def __init__(self, slice_number: int=(- 1), metric: str='REFAREA'):
super().__init__(metric)
self.slice_number = slice_number
def calculate(self):
return self._calculate_area(self.reference, self.slice_number) |
def lora_iterframes(module):
frame = module.fetch_frame()
if (frame is None):
return []
while (frame is not None):
(yield frame)
frame = (None if (frame['has_more'] == False) else module.fetch_frame()) |
class TableSummaryLabel():
def __init__(self, target_table, table_col_widths, label_key, label_value):
if (len(table_col_widths) != 2):
raise NotImplementedError('Currently TableSummaryLabel can only be created for a table widget with exactly two columns (including index column)')
self.target_table = target_table
self.table_col_widths = table_col_widths
self.widget = self._create(label_key, label_value)
def update_and_resize(self, value):
self.widget.children[1].value = str(value)
try:
table_width = int(self.target_table.layout.width.rstrip('px'))
except AttributeError:
logger.warning("target_table doesn't have any fixed width defined, label cannot be resized!", exc_info=1)
return
max_rows_allowed = self.target_table.grid_options['maxVisibleRows']
if (len(self.target_table.df) > max_rows_allowed):
table_width -= 12
self.widget.children[0].layout.width = f'{((table_width * self.table_col_widths[0]) / 100)}px'
self.widget.children[1].layout.width = f'{((table_width * self.table_col_widths[1]) / 100)}px'
def _create(self, key, value):
component_layout_options = dict(flex='0 0 auto', padding='0px 2px', margin='0px')
return ipw.Box([ipw.HTML(f"<div style='text-align:right;'> <b>{key}:<b> </div>", layout=component_layout_options), ipw.HTML(str(value), layout=component_layout_options)], layout=dict(display='flex', justify_content='flex-start')) |
class grounding_dataset(Dataset):
def __init__(self, ann_file, transform, image_root, max_words=30, mode='train'):
self.ann = []
for f in ann_file:
self.ann += json.load(open(f, 'r'))
self.transform = transform
self.image_root = image_root
self.max_words = max_words
self.mode = mode
if (self.mode == 'train'):
self.img_ids = {}
n = 0
for ann in self.ann:
img_id = ann['image'].split('/')[(- 1)]
if (img_id not in self.img_ids.keys()):
self.img_ids[img_id] = n
n += 1
def __len__(self):
return len(self.ann)
def __getitem__(self, index):
ann = self.ann[index]
image_path = os.path.join(self.image_root, ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.transform(image)
caption = pre_caption(ann['text'], self.max_words)
if (self.mode == 'train'):
img_id = ann['image'].split('/')[(- 1)]
return (image, caption, self.img_ids[img_id])
else:
return (image, caption, ann['ref_id']) |
class QuantumCliffordAlgebraRootUnity(QuantumCliffordAlgebra):
def __init__(self, n, k, q, F):
psi = cartesian_product(([((- 1), 0, 1, 2)] * n))
indices = [(tuple(p), tuple(w)) for p in psi for w in product(list(range((2 * k))), repeat=n)]
super().__init__(n, k, q, F, psi, indices)
def _repr_term(self, m):
(p, v) = m
def ppr(i):
val = p[i]
if (val == (- 1)):
return ('psid%s' % i)
elif (val == 1):
return ('psi%s' % i)
elif (val == 2):
return ('psi%s*psid%s' % (i, i))
rp = '*'.join((ppr(i) for i in range(self._n) if (p[i] != 0)))
gen_str = (lambda e: ('' if (e == 1) else ('^%s' % e)))
rv = '*'.join(((('w%s' % i) + gen_str(v[i])) for i in range(self._n) if (v[i] != 0)))
if rp:
if rv:
return ((rp + '*') + rv)
return rp
if rv:
return rv
return '1'
def _latex_term(self, m):
(p, v) = m
def ppr(i):
val = p[i]
if (val == (- 1)):
return ('\\psi^{\\dagger}_{%s}' % i)
elif (val == 1):
return ('\\psi_{%s}' % i)
elif (val == 2):
return ('\\psi_{%s}\\psi^{\\dagger}_{%s}' % (i, i))
rp = ''.join((ppr(i) for i in range(self._n) if (p[i] != 0)))
gen_str = (lambda e: ('' if (e == 1) else ('^{%s}' % e)))
rv = ''.join(((('\\omega_{%s}' % i) + gen_str(v[i])) for i in range(self._n) if (v[i] != 0)))
if ((not rp) and (not rv)):
return '1'
return (rp + rv)
_method
def product_on_basis(self, m1, m2):
(p1, w1) = m1
(p2, w2) = m2
k = self._k
tk = (2 * k)
if any((((((p1[i] % 2) != 0) and (p1[i] == p2[i])) or ((p1[i] == 2) and (p2[i] == (- 1))) or ((p2[i] == 2) and (p1[i] == 1))) for i in range(self._n))):
return self.zero()
v = [((w1[i] + w2[i]) % tk) for i in range(self._n)]
q_power = 0
sign = 1
pairings = []
p = ([0] * self._n)
num_cross = 0
total_cross = 0
for i in range(self._n):
num_cross += p2[i]
if (p2[i] == 2):
if (p1[i] != 0):
v[i] = ((v[i] + k) % tk)
p[i] = p1[i]
else:
p[i] = p2[i]
elif (p2[i] != 0):
q_power += (w1[i] * p2[i])
if (p1[i] == (- 1)):
pairings.append(i)
total_cross -= 1
p[i] = None
elif (p1[i] == 1):
total_cross -= 1
p[i] = 2
elif (p1[i] == 2):
q_power += k
v[i] = ((v[i] + k) % tk)
p[i] = p2[i]
else:
p[i] = p2[i]
else:
p[i] = p1[i]
if (abs(p1[i]) == 1):
total_cross += num_cross
if (total_cross % 2):
sign = (- sign)
def key(X):
e = list(v)
for i in pairings:
if (i in X):
p[i] = 2
else:
p[i] = 0
e[i] = ((e[i] + k) % tk)
return (self._psi(p), tuple(e))
q = self._q
ret = {key(X): ((((- 1) ** len(X)) * sign) * (q ** (q_power + (k * (len(pairings) % 2))))) for X in powerset(pairings)}
return self._from_dict(ret)
class Element(QuantumCliffordAlgebra.Element):
def inverse(self):
if (not self):
raise ZeroDivisionError
if (len(self) != 1):
return super().__invert__()
Cl = self.parent()
(((p, w), coeff),) = list(self._monomial_coefficients.items())
if any(((p[i] != 0) for i in range(Cl._n))):
return super().__invert__()
tk = (2 * Cl._k)
w = tuple([((tk - val) if val else 0) for val in w])
return Cl.element_class(Cl, {(p, w): coeff.inverse_of_unit()})
__invert__ = inverse |
def eval_parametric_match(y_pred: np.array, y_true: np.array, ubiquitous_types: str, common_types: set, label_enc, top_n: int=10):
corr_ubiq_types = 0
all_param_common_types = 0
corr_param_common_types = 0
all_param_rare_types = 0
corr_param_rare_types = 0
param_type_match = '(.+)\\[(.+)\\]'
def pred_param_types(pred_types: np.array, true_param_type):
no_match = 0
for p in label_enc.inverse_transform(pred_types):
if re.match(param_type_match, p):
if (true_param_type.group(1) == re.match(param_type_match, p).group(1)):
no_match += 1
break
return no_match
for (idx, t) in enumerate(tqdm(y_true, total=len(y_true), desc='Calculating parametric match')):
if (t in ubiquitous_types):
if (t in y_pred[idx][:top_n]):
corr_ubiq_types += 1
elif (t in common_types):
all_param_common_types += 1
if (t in y_pred[idx][:top_n]):
corr_param_common_types += 1
else:
matched_param_type = re.match(param_type_match, label_enc.inverse_transform([t])[0])
if matched_param_type:
corr_param_common_types += pred_param_types(y_pred[idx], matched_param_type)
else:
all_param_rare_types += 1
if (t in y_pred[idx][:top_n]):
corr_param_rare_types += 1
else:
matched_param_type = re.match(param_type_match, label_enc.inverse_transform([t])[0])
if matched_param_type:
corr_param_rare_types += pred_param_types(y_pred[idx], matched_param_type)
return (((((corr_ubiq_types + corr_param_common_types) + corr_param_rare_types) / len(y_pred)) * 100.0), ((corr_param_common_types / all_param_common_types) * 100.0), ((corr_param_rare_types / all_param_rare_types) * 100.0)) |
def test_line_coverage_is_covered(subject_properties_mock, trace_mock):
subject_properties_mock.existing_lines = {0: LineMetaData(0, 'foo', 0), 1: LineMetaData(0, 'foo', 1)}
trace_mock.covered_line_ids = {0, 1}
assert ff.compute_line_coverage_fitness_is_covered(trace_mock, subject_properties_mock) |
class TestSynctex(TemporaryShowyourworkRepository):
def check_build(self):
with gzip.open((self.cwd / 'ms.synctex.gz')) as f:
data = f.read().decode('utf-8')
for line in data.splitlines():
if (not line.startswith('Input:')):
continue
path = line.split(':')[(- 1)].strip()
if (not len(path)):
continue
path = Path(path).relative_to(self.cwd)
if (path.name == 'ms.tex'):
assert (path.as_posix() == 'src/tex/ms.tex')
return
raise AssertionError('ms.tex not found in synctex') |
class Optimizer():
def __init__(self, name: str='Train', tf_optimizer: str='tf.train.AdamOptimizer', learning_rate: TfExpressionEx=0.001, minibatch_multiplier: TfExpressionEx=None, share: 'Optimizer'=None, use_loss_scaling: bool=False, loss_scaling_init: float=64.0, loss_scaling_inc: float=0.0005, loss_scaling_dec: float=1.0, report_mem_usage: bool=False, **kwargs):
self.name = name
self.learning_rate = learning_rate
self.minibatch_multiplier = minibatch_multiplier
self.id = self.name.replace('/', '.')
self.scope = tf.get_default_graph().unique_name(self.id)
self.optimizer_class = util.get_obj_by_name(tf_optimizer)
self.optimizer_kwargs = dict(kwargs)
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self._updates_applied = False
self._devices = OrderedDict()
self._shared_optimizers = OrderedDict()
self._gradient_shapes = None
self._report_mem_usage = report_mem_usage
assert callable(self.optimizer_class)
if (share is not None):
assert isinstance(share, Optimizer)
assert (self.optimizer_class is share.optimizer_class)
assert (self.learning_rate is share.learning_rate)
assert (self.optimizer_kwargs == share.optimizer_kwargs)
self._shared_optimizers = share._shared_optimizers
def _get_device(self, device_name: str):
tfutil.assert_tf_initialized()
if (device_name in self._devices):
return self._devices[device_name]
device = util.EasyDict()
device.name = device_name
device.optimizer = None
device.loss_scaling_var = None
device.grad_raw = OrderedDict()
device.grad_clean = OrderedDict()
device.grad_acc_vars = OrderedDict()
device.grad_acc_count = None
device.grad_acc = OrderedDict()
with tfutil.absolute_name_scope((self.scope + '/Devices')), tf.device(device_name), tf.control_dependencies(None):
if (device_name not in self._shared_optimizers):
optimizer_name = (self.scope.replace('/', '_') + ('_opt%d' % len(self._shared_optimizers)))
self._shared_optimizers[device_name] = self.optimizer_class(name=optimizer_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
device.optimizer = self._shared_optimizers[device_name]
if self.use_loss_scaling:
device.loss_scaling_var = tf.Variable(np.float32(self.loss_scaling_init), trainable=False, name='loss_scaling_var')
self._devices[device_name] = device
return device
def register_gradients(self, loss: TfExpression, trainable_vars: Union[(List, dict)]) -> None:
tfutil.assert_tf_initialized()
assert (not self._updates_applied)
device = self._get_device(loss.device)
if isinstance(trainable_vars, dict):
trainable_vars = list(trainable_vars.values())
assert (isinstance(trainable_vars, list) and (len(trainable_vars) >= 1))
assert all((tfutil.is_tf_expression(expr) for expr in (trainable_vars + [loss])))
assert all(((var.device == device.name) for var in trainable_vars))
if (self._gradient_shapes is None):
self._gradient_shapes = [var.shape.as_list() for var in trainable_vars]
assert (len(trainable_vars) == len(self._gradient_shapes))
assert all(((var.shape.as_list() == var_shape) for (var, var_shape) in zip(trainable_vars, self._gradient_shapes)))
deps = []
if self._report_mem_usage:
self._report_mem_usage = False
try:
with tf.name_scope((self.id + '_mem')), tf.device(device.name), tf.control_dependencies([loss]):
deps.append(autosummary.autosummary((self.id + '/mem_usage_gb'), (tf.contrib.memory_stats.BytesInUse() / (2 ** 30))))
except tf.errors.NotFoundError:
pass
with tf.name_scope((self.id + '_grad')), tf.device(device.name), tf.control_dependencies(deps):
loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
gate = tf.train.Optimizer.GATE_NONE
grad_list = device.optimizer.compute_gradients(loss=loss, var_list=trainable_vars, gate_gradients=gate)
for (grad, var) in grad_list:
if (var not in device.grad_raw):
device.grad_raw[var] = []
device.grad_raw[var].append(grad)
def apply_updates(self, allow_no_op: bool=False) -> tf.Operation:
tfutil.assert_tf_initialized()
assert (not self._updates_applied)
self._updates_applied = True
all_ops = []
if (allow_no_op and (len(self._devices) == 0)):
with tfutil.absolute_name_scope(self.scope):
return tf.no_op(name='TrainingOp')
for (device_idx, device) in enumerate(self._devices.values()):
with tfutil.absolute_name_scope((self.scope + ('/Clean%d' % device_idx))), tf.device(device.name):
for (var, grad) in device.grad_raw.items():
grad = [g for g in grad if (g is not None)]
grad = [tf.cast(g, tf.float32) for g in grad]
if (len(grad) == 0):
grad = tf.zeros(var.shape)
elif (len(grad) == 1):
grad = grad[0]
else:
grad = tf.add_n(grad)
scale = ((1.0 / len(device.grad_raw[var])) / len(self._devices))
scale = tf.constant(scale, dtype=tf.float32, name='scale')
if (self.minibatch_multiplier is not None):
scale /= tf.cast(self.minibatch_multiplier, tf.float32)
scale = self.undo_loss_scaling(scale)
device.grad_clean[var] = (grad * scale)
if (len(self._devices) > 1):
with tfutil.absolute_name_scope((self.scope + '/Broadcast')), tf.device(None):
for all_vars in zip(*[device.grad_clean.keys() for device in self._devices.values()]):
if ((len(all_vars) > 0) and all(((dim > 0) for dim in all_vars[0].shape.as_list()))):
all_grads = [device.grad_clean[var] for (device, var) in zip(self._devices.values(), all_vars)]
all_grads = nccl_ops.all_sum(all_grads)
for (device, var, grad) in zip(self._devices.values(), all_vars, all_grads):
device.grad_clean[var] = grad
for (device_idx, device) in enumerate(self._devices.values()):
with tfutil.absolute_name_scope((self.scope + ('/Apply%d' % device_idx))), tf.device(device.name):
if (self.minibatch_multiplier is None):
acc_ok = tf.constant(True, name='acc_ok')
device.grad_acc = OrderedDict(device.grad_clean)
else:
with tf.control_dependencies(None):
for var in device.grad_clean.keys():
device.grad_acc_vars[var] = tf.Variable(tf.zeros(var.shape), trainable=False, name='grad_acc_var')
device.grad_acc_count = tf.Variable(tf.zeros([]), trainable=False, name='grad_acc_count')
count_cur = (device.grad_acc_count + 1.0)
count_inc_op = (lambda : tf.assign(device.grad_acc_count, count_cur))
count_reset_op = (lambda : tf.assign(device.grad_acc_count, tf.zeros([])))
acc_ok = (count_cur >= tf.cast(self.minibatch_multiplier, tf.float32))
all_ops.append(tf.cond(acc_ok, count_reset_op, count_inc_op))
for (var, grad) in device.grad_clean.items():
print(var.name)
print(grad.name)
acc_var = device.grad_acc_vars[var]
acc_cur = (acc_var + tf.cast(grad, tf.float32))
device.grad_acc[var] = acc_cur
with tf.control_dependencies([acc_cur]):
acc_inc_op = (lambda : tf.assign(acc_var, acc_cur))
acc_reset_op = (lambda : tf.assign(acc_var, tf.zeros(var.shape)))
all_ops.append(tf.cond(acc_ok, acc_reset_op, acc_inc_op))
all_ok = tf.reduce_all(tf.stack(([acc_ok] + [tf.reduce_all(tf.is_finite(g)) for g in device.grad_acc.values()])))
apply_op = (lambda : device.optimizer.apply_gradients([(tf.cast(grad, var.dtype), var) for (var, grad) in device.grad_acc.items()]))
all_ops.append(tf.cond(all_ok, apply_op, tf.no_op))
if self.use_loss_scaling:
ls_inc_op = (lambda : tf.assign_add(device.loss_scaling_var, self.loss_scaling_inc))
ls_dec_op = (lambda : tf.assign_sub(device.loss_scaling_var, self.loss_scaling_dec))
ls_update_op = (lambda : tf.group(tf.cond(all_ok, ls_inc_op, ls_dec_op)))
all_ops.append(tf.cond(acc_ok, ls_update_op, tf.no_op))
if (device_idx == (len(self._devices) - 1)):
all_ops.append(autosummary.autosummary((self.id + '/learning_rate'), self.learning_rate))
all_ops.append(autosummary.autosummary((self.id + '/overflow_frequency'), tf.where(all_ok, 0, 1), condition=acc_ok))
if self.use_loss_scaling:
all_ops.append(autosummary.autosummary((self.id + '/loss_scaling_log2'), device.loss_scaling_var))
self.reset_optimizer_state()
if self.use_loss_scaling:
tfutil.init_uninitialized_vars([device.loss_scaling_var for device in self._devices.values()])
if (self.minibatch_multiplier is not None):
tfutil.run([var.initializer for device in self._devices.values() for var in (list(device.grad_acc_vars.values()) + [device.grad_acc_count])])
with tfutil.absolute_name_scope(self.scope):
return tf.group(*all_ops, name='TrainingOp')
def reset_optimizer_state(self) -> None:
tfutil.assert_tf_initialized()
tfutil.run([var.initializer for device in self._devices.values() for var in device.optimizer.variables()])
def get_loss_scaling_var(self, device: str) -> Union[(tf.Variable, None)]:
return self._get_device(device).loss_scaling_var
def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
assert tfutil.is_tf_expression(value)
if (not self.use_loss_scaling):
return value
return (value * tfutil.exp2(self.get_loss_scaling_var(value.device)))
def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
assert tfutil.is_tf_expression(value)
if (not self.use_loss_scaling):
return value
return (value * tfutil.exp2((- self.get_loss_scaling_var(value.device)))) |
class BaseDecodeHead(BaseModule, metaclass=ABCMeta):
def __init__(self, in_channels, channels, *, num_classes, out_channels=None, threshold=None, dropout_ratio=0.1, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), in_index=(- 1), input_transform=None, loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), ignore_index=255, sampler=None, align_corners=False, init_cfg=dict(type='Normal', std=0.01, override=dict(name='conv_seg'))):
super(BaseDecodeHead, self).__init__(init_cfg)
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.ignore_index = ignore_index
self.align_corners = align_corners
if (out_channels is None):
if (num_classes == 2):
warnings.warn('For binary segmentation, we suggest using`out_channels = 1` to define the outputchannels of segmentor, and use `threshold`to convert seg_logist into a predictionapplying a threshold')
out_channels = num_classes
if ((out_channels != num_classes) and (out_channels != 1)):
raise ValueError(f'out_channels should be equal to num_classes,except binary segmentation set out_channels == 1 andnum_classes == 2, but got out_channels={out_channels}and num_classes={num_classes}')
if ((out_channels == 1) and (threshold is None)):
threshold = 0.3
warnings.warn('threshold is not defined for binary, and defaultsto 0.3')
self.num_classes = num_classes
self.out_channels = out_channels
self.threshold = threshold
if isinstance(loss_decode, dict):
self.loss_decode = build_loss(loss_decode)
elif isinstance(loss_decode, (list, tuple)):
self.loss_decode = nn.ModuleList()
for loss in loss_decode:
self.loss_decode.append(build_loss(loss))
else:
raise TypeError(f'loss_decode must be a dict or sequence of dict, but got {type(loss_decode)}')
if (sampler is not None):
self.sampler = build_pixel_sampler(sampler, context=self)
else:
self.sampler = None
self.conv_seg = nn.Conv2d(channels, self.out_channels, kernel_size=1)
if (dropout_ratio > 0):
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
s = f'input_transform={self.input_transform}, ignore_index={self.ignore_index}, align_corners={self.align_corners}'
return s
def _init_inputs(self, in_channels, in_index, input_transform):
if (input_transform is not None):
assert (input_transform in ['resize_concat', 'multiple_select'])
self.input_transform = input_transform
self.in_index = in_index
if (input_transform is not None):
assert isinstance(in_channels, (list, tuple))
assert isinstance(in_index, (list, tuple))
assert (len(in_channels) == len(in_index))
if (input_transform == 'resize_concat'):
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int)
assert isinstance(in_index, int)
self.in_channels = in_channels
def _transform_inputs(self, inputs):
if (self.input_transform == 'resize_concat'):
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [resize(input=x, size=inputs[0].shape[2:], mode='bilinear', align_corners=self.align_corners) for x in inputs]
inputs = torch.cat(upsampled_inputs, dim=1)
elif (self.input_transform == 'multiple_select'):
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
_fp16()
def forward(self, inputs):
pass
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
seg_logits = self(inputs)
losses = self.losses(seg_logits, gt_semantic_seg)
return losses
def forward_test(self, inputs, img_metas, test_cfg):
return self.forward(inputs)
def cls_seg(self, feat):
if (self.dropout is not None):
feat = self.dropout(feat)
output = self.conv_seg(feat)
return output
_fp32(apply_to=('seg_logit',))
def losses(self, seg_logit, seg_label):
loss = dict()
seg_logit = resize(input=seg_logit, size=seg_label.shape[2:], mode='bilinear', align_corners=self.align_corners)
if (self.sampler is not None):
seg_weight = self.sampler.sample(seg_logit, seg_label)
else:
seg_weight = None
seg_label = seg_label.squeeze(1)
if (not isinstance(self.loss_decode, nn.ModuleList)):
losses_decode = [self.loss_decode]
else:
losses_decode = self.loss_decode
for loss_decode in losses_decode:
if (loss_decode.loss_name not in loss):
loss[loss_decode.loss_name] = loss_decode(seg_logit, seg_label, weight=seg_weight, ignore_index=self.ignore_index)
else:
loss[loss_decode.loss_name] += loss_decode(seg_logit, seg_label, weight=seg_weight, ignore_index=self.ignore_index)
loss['acc_seg'] = accuracy(seg_logit, seg_label, ignore_index=self.ignore_index)
return loss |
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None, search=False) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = (kdim if (kdim is not None) else embed_dim)
self.vdim = (vdim if (vdim is not None) else embed_dim)
self._qkv_same_embed_dim = ((self.kdim == embed_dim) and (self.vdim == embed_dim))
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = (embed_dim // num_heads)
assert ((self.head_dim * num_heads) == self.embed_dim), 'embed_dim must be divisible by num_heads'
if (self._qkv_same_embed_dim is False):
self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))
self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(((3 * embed_dim), embed_dim), **factory_kwargs))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty((3 * embed_dim), **factory_kwargs))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
if search:
self.alpha = Parameter(torch.ones(1, 1, self.head_dim)) |
def register_Ns3CallbackImpl__Void_Ns3Time_Ns3Time_WifiPhyState_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Time, ns3::Time, WifiPhyState, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('ns3::Time', 'arg0'), param('ns3::Time', 'arg1'), param('WifiPhyState', 'arg2')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
def not_number_date_field(identifier):
return ((identifier != '*') and (not re.fullmatch(number_pattern, identifier)) and (not re.fullmatch(datetime_pattern, identifier)) and (not re.fullmatch(field_pattern, identifier))) |
class XmlElem(object):
tag = None
Meta = None
def from_xml(cls, xml):
if (cls.tag != xml.tag):
raise ValueError(('Tag mismatch: expected %s but got %s' % (cls.tag, xml.tag)))
attrs = cls.get_attrs()
inst = cls()
used_attrs = []
for (name, attr) in attrs:
val = attr.from_xml(xml)
if isinstance(attr, XmlAttr):
used_attrs.append(attr.name)
if (val is not None):
setattr(inst, name, val)
for attr in list(xml.attrib.keys()):
if (attr not in used_attrs):
raise ValueError(('Unrecognized attribute: %s' % attr))
return inst
def get_attrs(cls):
if (not hasattr(cls, '_attrs')):
all_attrs = dir(cls.Meta)
attrs = [(attr, getattr(cls.Meta, attr)) for attr in all_attrs if isinstance(getattr(cls.Meta, attr), AttrDecl)]
cls._attrs = attrs
return cls._attrs
def __str__(self):
attrs = []
for (name, _) in self.__class__.get_attrs():
attrs.append(('%s=%s' % (name, str(getattr(self, name)))))
return (((self.__class__.__name__ + '(') + ', '.join(attrs)) + ')')
def __repr__(self):
return str(self) |
class OneHyperplaneClassifier(nn.Module):
def __init__(self, x_dim, y_dim, P, a=None, b=None, ksig=5):
super(OneHyperplaneClassifier, self).__init__()
if (a is None):
self.a = Parameter(torch.matmul(torch.randn(int(y_dim), int(x_dim)), torch.t(P)))
else:
assert (a.shape == (int(y_dim), int(x_dim)))
self.a = Parameter(torch.matmul(torch.Tensor(a), torch.t(P)))
if (b is None):
self.b = Parameter(torch.Tensor(int(y_dim)))
nn.init.constant_(self.b, 0.0)
else:
assert (b.shape == int(y_dim))
self.b = Parameter(torch.Tensor(b))
self.ksig = ksig
'\n Perform classification: yhat = sig(k*(a1^Tx-b1))*sig(k*(a2^Tx-b2))\n \n Inputs:\n - x : input data sample\n \n Outputs:\n - yhat : ( p(yhat=0), p(yhat=1) )\n - a : slope of hyperplane\n '
def forward(self, x):
z = F.linear(x, self.a, ((- 1) * self.b))
yhat_class0 = torch.sigmoid((self.ksig * z))
yhat_class1 = (1.0 - yhat_class0)
yhat = torch.cat((yhat_class0, yhat_class1), 1)
return (yhat, self.a) |
def get_pixel_grids(height, width):
with torch.no_grad():
x_linspace = torch.linspace(0.5, (width - 0.5), width).view(1, width).expand(height, width)
y_linspace = torch.linspace(0.5, (height - 0.5), height).view(height, 1).expand(height, width)
x_coordinates = x_linspace.contiguous().view((- 1))
y_coordinates = y_linspace.contiguous().view((- 1))
ones = torch.ones((height * width))
indices_grid = torch.stack([x_coordinates, y_coordinates, ones], dim=0)
return indices_grid |
def pop_func(pop_tensor, pop_coeff):
pop_tensor = tf.multiply(pop_tensor, pop_coeff)
pop_tensor = tf.where((pop_tensor >= 1.0), tf.ones_like(pop_tensor), pop_tensor)
pop_curve = tf.exp((- pop_tensor))
return pop_curve |
class LanguageModelingAdapter(Adapter):
(None)
def adapt(self, instances: List[Instance], parallelism: int) -> ScenarioState:
eval_instances: List[Instance] = [instance for instance in instances if (instance.split in EVAL_SPLITS)]
hlog(f'{len(eval_instances)} eval instances')
assert (len(eval_instances) == len(instances)), ('Non-evaluation instances were passed to LanguageModelingAdapter, but LanguageModelingAdapter ' + 'expects evaluation instances only. Please open a GitHub issue with your RunSpec.')
all_request_states: List[RequestState] = flatten_list(parallel_map(self._generate_requests, eval_instances, parallelism))
hlog(f'{len(all_request_states)} requests')
return ScenarioState(self.adapter_spec, all_request_states)
def _generate_requests(self, eval_instance: Instance) -> List[RequestState]:
max_sequence_length: int = self.window_service.max_sequence_length
max_request_length: int = self.window_service.max_request_length
max_request_length = min(max_request_length, (self.window_service.max_sequence_and_generated_tokens_length - self.adapter_spec.max_tokens))
prefix_token: str = self.window_service.prefix_token
encode_result: EncodeResult = self.window_service.encode(eval_instance.input.text)
tokens: List[TokenizationToken] = encode_result.tokens
text: str = encode_result.text
request_states: List[RequestState] = []
num_predicted_tokens: int = 0
first_seq_len: int = min(max_sequence_length, len(tokens))
(prompt_text, num_conditioning_tokens) = self.construct_language_modeling_prompt(self.window_service.encode(prefix_token).tokens, tokens[:first_seq_len], max_request_length, text)
request = Request(model=self.adapter_spec.model, model_deployment=self.adapter_spec.model_deployment, prompt=prompt_text, num_completions=1, temperature=0, max_tokens=self.adapter_spec.max_tokens, stop_sequences=self.adapter_spec.stop_sequences, echo_prompt=True, random=self.adapter_spec.random)
request_state = RequestState(instance=eval_instance, reference_index=None, request_mode=None, train_trial_index=0, output_mapping=None, request=request, result=None, num_conditioning_tokens=(1 if (len(prefix_token) > 0) else 0), num_train_instances=self.adapter_spec.max_train_instances, prompt_truncated=False)
request_states.append(request_state)
num_predicted_tokens += first_seq_len
while (num_predicted_tokens < len(tokens)):
window_pred_len: int = min((len(tokens) - num_predicted_tokens), (max_request_length - 1))
window_end: int = (num_predicted_tokens + window_pred_len)
conditioning_tokens: List[TokenizationToken] = tokens[(window_end - max_request_length):num_predicted_tokens]
pred_tokens: List[TokenizationToken] = tokens[num_predicted_tokens:window_end]
(prompt_text, num_conditioning_tokens) = self.construct_language_modeling_prompt(conditioning_tokens, pred_tokens, max_request_length, text)
request = Request(model=self.adapter_spec.model, model_deployment=self.adapter_spec.model_deployment, prompt=prompt_text, num_completions=1, temperature=0, max_tokens=self.adapter_spec.max_tokens, stop_sequences=self.adapter_spec.stop_sequences, echo_prompt=True)
request_state = RequestState(instance=eval_instance, reference_index=None, request_mode=None, train_trial_index=0, output_mapping=None, request=request, result=None, num_conditioning_tokens=num_conditioning_tokens, num_train_instances=self.adapter_spec.max_train_instances, prompt_truncated=False)
request_states.append(request_state)
num_predicted_tokens += window_pred_len
return request_states
def construct_language_modeling_prompt(self, conditioning_tokens: List[TokenizationToken], pred_tokens: List[TokenizationToken], max_req_len: int, text: str) -> Tuple[(str, int)]:
raw_prompt: str
(raw_prompt, pred_tokens) = self.fits_tokens_within_context_window(conditioning_tokens, pred_tokens, max_req_len, text)
prompt: str = raw_prompt.strip('')
if (len(prompt) == len(raw_prompt)):
num_conditioning_tokens = len(conditioning_tokens)
else:
num_leading_byte_tokens: int = (max_req_len - len(self.window_service.encode(raw_prompt.lstrip('')).tokens))
num_trailing_byte_tokens: int = (max_req_len - len(self.window_service.encode(raw_prompt.rstrip('')).tokens))
if (num_trailing_byte_tokens >= len(pred_tokens)):
num_conditioning_tokens = len(self.window_service.encode(prompt).tokens)
elif (num_leading_byte_tokens >= len(conditioning_tokens)):
num_conditioning_tokens = 1
else:
num_conditioning_tokens = (len(conditioning_tokens) - num_leading_byte_tokens)
return (prompt, num_conditioning_tokens)
def fits_tokens_within_context_window(self, conditioning_tokens: List[TokenizationToken], pred_tokens: List[TokenizationToken], max_req_len: int, text: Optional[str]=None) -> Tuple[(str, List[TokenizationToken])]:
prompt: str = self.window_service.decode((conditioning_tokens + pred_tokens), text)
prompt_length: int = len(self.window_service.encode(prompt).tokens)
while (prompt_length > max_req_len):
pred_tokens = pred_tokens[:(- (prompt_length - max_req_len))]
prompt = self.window_service.decode((conditioning_tokens + pred_tokens), text)
prompt_length = len(self.window_service.encode(prompt).tokens)
if (len(pred_tokens) == 0):
raise ValueError('Truncating pred_tokens to fit them in the context window, got len(pred_tokens) == 0, which will lead to an infinite loop.')
return (prompt, pred_tokens) |
def encode_all_types(df_ret: pd.DataFrame, df_params: pd.DataFrame, df_vars: pd.DataFrame, output_dir: str):
all_types = np.concatenate((df_ret['return_type'].values, df_params['arg_type'].values, df_vars['var_type'].values), axis=0)
le_all = LabelEncoder()
le_all.fit(all_types)
df_ret['return_type_enc_all'] = le_all.transform(df_ret['return_type'].values)
df_params['arg_type_enc_all'] = le_all.transform(df_params['arg_type'].values)
df_vars['var_type_enc_all'] = le_all.transform(df_vars['var_type'].values)
(unq_types, count_unq_types) = np.unique(all_types, return_counts=True)
pd.DataFrame(list(zip(le_all.transform(unq_types), [unq_types[i] for i in np.argsort(count_unq_types)[::(- 1)]], [count_unq_types[i] for i in np.argsort(count_unq_types)[::(- 1)]])), columns=['enc', 'type', 'count']).to_csv(os.path.join(output_dir, '_most_frequent_all_types.csv'), index=False)
logger.info(f'Total no. of extracted types: {len(all_types):,}')
logger.info(f'Total no. of unique types: {len(unq_types):,}')
return (df_ret, df_params, le_all) |
def CntNonZNodes(tspec, *args):
if (type(tspec) == PUNGraph):
return CntNonZNodes_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return CntNonZNodes_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return CntNonZNodes_PDirNet(tspec, *args)
if (type(tspec) == PNGraph):
return CntNonZNodes_PNGraph(tspec, *args)
if (type(tspec) == PNEANet):
return CntNonZNodes_PNEANet(tspec, *args)
if (type(tspec) == PNGraphMP):
return CntNonZNodes_PNGraphMP(tspec, *args)
if (type(tspec) == PNEANetMP):
return CntNonZNodes_PNEANetMP(tspec, *args)
raise TypeError('First argument has invalid type') |
def test_BitMaskedArray():
v2_array = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1], dtype=np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True, length=13, lsb_order=False)
assert (ak.validity_error(v2_array) == '')
assert (ak.validity_error(v2_array.to_typetracer()) == '') |
def collate(samples, pad_idx, eos_idx):
if (len(samples) == 0):
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(data_utils.collate_tokens([s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False))
return res
else:
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad=False)
src_tokens = merge('source')
if (samples[0]['target'] is not None):
is_target_list = isinstance(samples[0]['target'], list)
target = merge('target', is_target_list)
else:
target = src_tokens
return {'id': torch.LongTensor([s['id'] for s in samples]), 'nsentences': len(samples), 'ntokens': sum((len(s['source']) for s in samples)), 'net_input': {'src_tokens': src_tokens, 'src_lengths': torch.LongTensor([s['source'].numel() for s in samples])}, 'target': target} |
def test_get_time_line_value_last(sequence_factory):
start_time = time.time_ns()
sequence_factory._time_stamps = [(start_time + i) for i in range(3)]
sequence_factory._values = [f'val_{i}' for i in range(3)]
assert (sequence_factory._get_time_line_value((start_time * 2)) == 'val_2') |
def test_dbnet_ignore_texts():
target_generator = textdet_targets.DBNetTargets()
ignore_tags = [True, False]
results = {}
text_polys = [[np.array([0, 0, 10, 0, 10, 10, 0, 10])], [np.array([20, 0, 30, 0, 30, 10, 20, 10])]]
text_polys_ignore = [[np.array([0, 0, 15, 0, 15, 10, 0, 10])]]
results['gt_masks_ignore'] = PolygonMasks(text_polys_ignore, 40, 40)
results['gt_masks'] = PolygonMasks(text_polys, 40, 40)
results['gt_bboxes'] = np.array([[0, 0, 10, 10], [20, 0, 30, 10]])
results['gt_labels'] = np.array([0, 1])
target_generator.ignore_texts(results, ignore_tags)
assert np.allclose(results['gt_labels'], np.array([1]))
assert (len(results['gt_masks_ignore'].masks) == 2)
assert np.allclose(results['gt_masks_ignore'].masks[1][0], text_polys[0][0])
assert (len(results['gt_masks'].masks) == 1) |
def acquire_from_twitter_api(input_data):
auth = tweepy.OAuthHandler(args.API_key, args.API_secret_key)
auth.set_access_token(args.access_token, args.access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser(), wait_on_rate_limit=True)
tweets_by_API = []
wrong_ones = []
for (idx, i) in enumerate(input_data):
if ((idx % 500) == 0):
print('[I] number of ids processed:', idx)
try:
tweets_by_API.append(api.get_status(i['id'], tweet_mode='extended'))
except tweepy.TweepError as e:
wrong_ones.append([i, e])
return (tweets_by_API, wrong_ones) |
def load_examples_dbpedia(path):
hypotheses = (' company', ' school', ' artist', ' athlete', ' politics', ' transportation', ' building', ' river', ' village', ' animal', ' plant', ' album', ' film', ' book')
label_path = '/gscratch/zlab/swj0419/knnlm/data/label_word/datasets/dbpedia/label_names_lot.txt'
label2synonym = load_label(label_path)
examples = []
with open(path) as fp:
reader = csv.DictReader(fp)
for row in reader:
d = {}
label = (int(row['Class']) - 1)
lmname = hypotheses[label]
premise = f'''{row['Text']}
This topic is about'''
options = []
for h in hypotheses:
o = {}
o['premise'] = premise
o['hypothesis'] = h
o['uncond_premise'] = '\n This topic is about'
o['uncond_hypothesis'] = h
options.append(o)
examples.append({'options': options, 'label': label, 'label2synonym': label2synonym, 'label_list': hypotheses})
return examples |
def get_cars_data():
df = pd.read_csv('source_data/cars/car.data.txt')
X = df.reindex(columns=[x for x in df.columns if (x != 'class')])
y = df.reindex(columns=['class'])
y = preprocessing.LabelEncoder().fit_transform(y.to_numpy().ravel())
mapping = [{'col': 'buying', 'mapping': [('vhigh', 0), ('high', 1), ('med', 2), ('low', 3)]}, {'col': 'maint', 'mapping': [('vhigh', 0), ('high', 1), ('med', 2), ('low', 3)]}, {'col': 'doors', 'mapping': [('2', 0), ('3', 1), ('4', 2), ('5more', 3)]}, {'col': 'persons', 'mapping': [('2', 0), ('4', 1), ('more', 2)]}, {'col': 'lug_boot', 'mapping': [('small', 0), ('med', 1), ('big', 2)]}, {'col': 'safety', 'mapping': [('high', 0), ('med', 1), ('low', 2)]}]
return (X, y, mapping) |
class AllReduce(Function):
def forward(ctx, num_inputs, *inputs):
ctx.num_inputs = num_inputs
ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]
inputs = [inputs[i:(i + num_inputs)] for i in range(0, len(inputs), num_inputs)]
inputs = sorted(inputs, key=(lambda i: i[0].get_device()))
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return tuple([t for tensors in outputs for t in tensors])
def backward(ctx, *inputs):
inputs = [i.data for i in inputs]
inputs = [inputs[i:(i + ctx.num_inputs)] for i in range(0, len(inputs), ctx.num_inputs)]
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return ((None,) + tuple([Variable(t) for tensors in outputs for t in tensors])) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.