code stringlengths 101 5.91M |
|---|
class NoamOpt():
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def zero_grad(self):
self.optimizer.zero_grad()
def step(self):
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step=None):
if (step is None):
step = self._step
return (self.factor * ((self.model_size ** (- 0.5)) * min((step ** (- 0.5)), (step * (self.warmup ** (- 1.5)))))) |
def init_pretrained_weights(key):
import os
import errno
import gdown
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(os.getenv(ENV_TORCH_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch')))
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if (e.errno == errno.EEXIST):
pass
else:
raise
filename = model_urls[key].split('/')[(- 1)]
cached_file = os.path.join(model_dir, filename)
if (not os.path.exists(cached_file)):
logger.info(f"Pretrain model don't exist, downloading from {model_urls[key]}")
if comm.is_main_process():
gdown.download(model_urls[key], cached_file, quiet=False)
comm.synchronize()
logger.info(f'Loading pretrained model from {cached_file}')
state_dict = torch.load(cached_file, map_location=torch.device('cpu'))
return state_dict |
def _random_replay_eval(*, self, source, idx: int, **_kwargs):
from returnn.tf.layers.basic import LayerBase
assert isinstance(self, LayerBase)
idx
def _py_func() -> numpy.ndarray:
elem = ReturnnLayersBackend._random_journal.get_next(new_out_template=self.output)
assert isinstance(elem.out, Tensor)
assert isinstance(elem.out.raw_tensor, numpy.ndarray)
return elem.out.raw_tensor
def _func() -> tf.Tensor:
(out,) = tf.numpy_function(_py_func, [], [self.output.dtype])
assert isinstance(out, tf.Tensor)
out.set_shape(self.output.batch_shape)
return out
with (tf.control_dependencies([source(i, auto_convert=False) for i in range(len(self.sources))]) if self.sources else contextlib.nullcontext()):
return _func() |
class SemimonomialActionVec(Action):
def __init__(self, G, V, check=True):
if check:
from sage.modules.free_module import FreeModule_generic
if (not isinstance(G, SemimonomialTransformationGroup)):
raise ValueError(('%s is not a semimonomial group' % G))
if (not isinstance(V, FreeModule_generic)):
raise ValueError(('%s is not a free module' % V))
if (V.ambient_module() != V):
raise ValueError(('%s is not equal to its ambient module' % V))
if (V.dimension() != G.degree()):
raise ValueError(('%s has a dimension different to the degree of %s' % (V, G)))
if (V.base_ring() != G.base_ring()):
raise ValueError(('%s and %s have different base rings' % (V, G)))
Action.__init__(self, G, V.dense_module())
def _act_(self, a, b):
b = b.apply_map(a.get_autom())
b = self.codomain()(a.get_perm().action(b))
return b.pairwise_product(self.codomain()(a.get_v_inverse())) |
def calc_matrix_error(act_tiles, pred_tiles, ncol_tiles, nrow_tiles):
user_matrix_error = 0.0
for fr in range(len(pred_tiles)):
act_tile = act_tiles[fr]
pred_tile = pred_tiles[fr]
act_prob = np.array([[0.0 for i in range(ncol_tiles)] for j in range(nrow_tiles)])
pred_prob = np.array([[0.0 for i in range(ncol_tiles)] for j in range(nrow_tiles)])
act_prob[(act_tile[0] % nrow_tiles)][(act_tile[1] % ncol_tiles)] = 1
x = ((nrow_tiles - 1) if (pred_tile[0] >= nrow_tiles) else pred_tile[0])
x = (0 if (x < 0) else x)
y = ((ncol_tiles - 1) if (pred_tile[1] >= ncol_tiles) else pred_tile[1])
y = (0 if (y < 0) else y)
pred_prob[x][y] = 1
d = 0.0
for i in range(nrow_tiles):
for j in range(ncol_tiles):
d += np.square((pred_prob[i][j] - act_prob[i][j]))
user_matrix_error += np.sqrt(d)
return (user_matrix_error / len(pred_tiles)) |
def _IfExp(t, symbols, inferred_symbols):
_dispatch(t.test, symbols, inferred_symbols)
type_body = _dispatch(t.body, symbols, inferred_symbols)
type_orelse = _dispatch(t.orelse, symbols, inferred_symbols)
return dtypes.result_type_of(type_body, type_orelse) |
class Policy():
def __init__(self, solver='ECOS'):
self._name = None
self._solver = solver
def name(self):
return self._name
def scale_factors_array(self, scale_factors, job_ids, m, n):
scale_factors_array = np.zeros((m, n))
for i in range(m):
for j in range(n):
scale_factors_array[(i, j)] = scale_factors[job_ids[i]]
return scale_factors_array
def flatten(self, d, cluster_spec):
job_ids = sorted(list(d.keys()))
if (len(job_ids) == 0):
return (None, None)
worker_types = sorted(list(d[job_ids[0]].keys()))
self._num_workers = [cluster_spec[worker_type] for worker_type in worker_types]
if (len(worker_types) == 0):
return (None, None)
m = []
for job_id in job_ids:
m_row = []
for worker_type in worker_types:
m_row.append(d[job_id][worker_type])
m.append(m_row)
return (np.array(m), (job_ids, worker_types))
def unflatten(self, m, index):
(job_ids, worker_types) = index
d = {}
for i in range(len(job_ids)):
d[job_ids[i]] = {}
for j in range(len(worker_types)):
d[job_ids[i]][worker_types[j]] = m[i][j]
return d
def get_base_constraints(self, x, scale_factors_array):
return [(x >= 0), (cp.sum(cp.multiply(scale_factors_array, x), axis=0) <= self._num_workers), (cp.sum(x, axis=1) <= 1)] |
def list_of_dicts__to__dict_of_lists(lst):
if (len(lst) == 0):
return {}
keys = lst[0].keys()
output_dict = collections.defaultdict(list)
for d in lst:
assert set(keys).issubset(set(d.keys()))
for k in set(keys):
output_dict[k].append(d[k])
return output_dict |
def parse_file(args):
(abundances_df, density_df, time_of_model, quantities_row) = convert_format(args.input_path)
filename = os.path.splitext(os.path.basename(args.input_path))[0]
save_fname = '.'.join((filename, 'csv'))
resultant_df = pd.concat([density_df, abundances_df], axis=1)
resultant_df.columns = pd.MultiIndex.from_tuples(zip(resultant_df.columns, quantities_row))
save_file_path = os.path.join(args.output_path, save_fname)
with open(save_file_path, 'w') as f:
f.write(' '.join(('t0:', str(time_of_model), 'day')))
f.write('\n')
resultant_df.to_csv(save_file_path, index=False, sep=' ', mode='a') |
class CosineSchedule(BaseSchedule):
def __init__(self, timesteps: int, device: Optional[torch.device]=None, s: float=0.008, *args, **kwargs) -> None:
self.s = s
super().__init__(timesteps, device, *args, **kwargs)
def _get_betas(self, timesteps: int) -> Tensor:
steps = (timesteps + 1)
x = torch.linspace(0, timesteps, steps)
alphas_cumprod = (torch.cos((((((x / timesteps) + self.s) / (1 + self.s)) * torch.pi) * 0.5)) ** 2)
alphas_cumprod = (alphas_cumprod / alphas_cumprod[0])
betas = (1 - (alphas_cumprod[1:] / alphas_cumprod[:(- 1)]))
return torch.clip(betas, 0.0001, 0.9999) |
def tweakval(val, identifier):
if (not identifier):
raise ValueError('Must provide an identifier for tweakval to work')
args = collect_args()
for (k, v) in args.items():
stripped = k.replace('-', '_')
if (stripped == identifier):
log(('replacing %s in %s with %s' % (stripped, str(val), str(v))))
return type(val)(v)
return val |
def evaluate(model, instances, iterator, device):
with torch.no_grad():
model.eval()
model.decode_type = 'mst'
test_generator = iterator(instances=instances, shuffle=False, num_epochs=1)
logger.info('Iterating over dataset')
generator_tqdm = Tqdm.tqdm(test_generator, total=iterator.get_num_batches(instances))
for batch in generator_tqdm:
batch = move_to_device(batch, device)
model(batch, for_training=True)
metrics = model.get_metrics()
description = (', '.join([('%s: %.2f' % (name, value)) for (name, value) in metrics.items()]) + ' ||')
generator_tqdm.set_description(description, refresh=False)
return model.get_metrics(reset=True) |
class GaussianPolicy(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_dim=256, n_hidden=2):
super().__init__()
self.net = mlp([obs_dim, *([hidden_dim] * n_hidden), act_dim])
self.log_std = nn.Parameter(torch.zeros(act_dim, dtype=torch.float32))
def forward(self, obs):
mean = self.net(obs)
std = torch.exp(self.log_std.clamp(LOG_STD_MIN, LOG_STD_MAX))
scale_tril = torch.diag(std)
return MultivariateNormal(mean, scale_tril=scale_tril)
def act(self, obs, deterministic=False, enable_grad=False):
with torch.set_grad_enabled(enable_grad):
dist = self(obs)
return (dist.mean if deterministic else dist.sample()) |
def test_ufunc_afterward():
assert ((ak.operations.values_astype(ak.highlevel.Array([{'x': 1.1}, {'x': 3.3}]), np.float32)['x'] + 1).to_list() == [2., 4.]) |
class TestExampleKeyORM(ORMTester):
def object(self):
return ('bob', 4)
def orm(self):
return ExampleKeyORM() |
def check_exists(path, preserve=DO_PRESERVE_RUNS):
if osp.exists(path):
print(f'{path} exists')
if (not preserve):
print(f'removing {path}')
shutil.rmtree(path, ignore_errors=True)
return True
return False |
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if (not src.endswith(os.path.sep)):
src += os.path.sep
if (not dst.endswith(os.path.sep)):
dst += os.path.sep
return dst.startswith(src) |
class BaseBackbone(nn.Module, metaclass=ABCMeta):
def __init__(self):
super(BaseBackbone, self).__init__()
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'pretrained must be a str or None. But received {type(pretrained)}.')
def forward(self, x):
pass
def train(self, mode=True):
super(BaseBackbone, self).train(mode) |
def _load_data(name):
filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', name)
with np.load(filename) as f:
return dict(f.items()) |
def partial_apply_nontensors(fn, args, **kwargs):
source = [('t' if (isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg)) else 's') for arg in args]
def new_fn(*tensors_):
tensors = iter(tensors_)
return fn(*((args[i] if (s == 's') else next(tensors)) for (i, s) in enumerate(source)), **kwargs)
return (new_fn, [arg for arg in args if (isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg))]) |
def mix_labels(target, lam, n_classes, label_smoothing=0.1):
onehot_target = label_smooth(target, n_classes, label_smoothing)
flipped_target = torch.flip(onehot_target, dims=[0])
return ((lam * onehot_target) + ((1 - lam) * flipped_target)) |
def read_ptb():
sys.stderr.write((('\nReading PTB data from ' + PTB_DATA_DIR) + ' ...\n'))
sentences = []
senno = 0
with codecs.open('ptb.sents', 'w', 'utf-8') as ptbsf:
for constitfile in os.listdir(PTB_DATA_DIR):
reader = BracketParseCorpusReader(PTB_DATA_DIR, constitfile)
parses = reader.parsed_sents()
for p in parses:
ptbsf.write((' '.join(p.leaves()) + '\n'))
tokpos = p.pos()
tokens = [VOCDICT.addstr(tok) for (tok, pos) in tokpos]
postags = [POSDICT.addstr(pos) for (tok, pos) in tokpos]
s = Sentence('constit', sentnum=senno, tokens=tokens, postags=postags)
s.get_all_parts_of_ctree(p, CLABELDICT, False)
sentences.append(s)
senno += 1
sys.stderr.write(('# PTB sentences: %d\n' % len(sentences)))
ptbsf.close()
return sentences |
def propagate_through_equivalence(links_by_name, set_2_nodes, node_2_set):
all_expanded_links = {}
for (name, links) in links_by_name.iteritems():
set_links = []
for link in links:
relation = link[0]
(arg1, arg2) = link[2]
set1 = node_2_set[arg1]
set2 = node_2_set[arg2]
if (set1 == set2):
logger.warn(('Link between %s and %s will create a self link by propagation, ignored.' % (arg1, arg2)))
else:
set_links.append((set1, set2, relation))
reduced_set_links = compute_reduced_graph(set_links)
expanded_links = set()
for link in reduced_set_links:
(arg1, arg2, relation) = link
for node1 in set_2_nodes[arg1]:
for node2 in set_2_nodes[arg2]:
expanded_links.add((node1, node2, relation))
all_expanded_links[name] = list(expanded_links)
return all_expanded_links |
def convert_conv2convws_model(module, process_group=None, channel_last=False):
mod = module
if isinstance(module, torch.nn.modules.conv._ConvNd):
if isinstance(module.bias, torch.Tensor):
bias = True
else:
bias = False
mod = Conv2dWS(module.in_channels, module.out_channels, module.kernel_size, module.stride, module.padding, module.dilation, module.groups, bias=bias)
mod.weight.data = module.weight.data.clone().detach()
if bias:
mod.bias.data = module.bias.data.clone().detach()
for (name, child) in module.named_children():
mod.add_module(name, convert_conv2convws_model(child, process_group=process_group, channel_last=channel_last))
del module
return mod |
class EvernoteManagerCreateNote(VirtualFunctionTool):
name = 'EvernoteManagerCreateNote'
summary = 'Create a new note with a title, content, and optional attachments.'
parameters: List[ArgParameter] = [{'name': 'title', 'type': 'string', 'description': 'The title of the note.', 'required': True}, {'name': 'content', 'type': 'string', 'description': 'The content of the note.', 'required': True}, {'name': 'attachments', 'type': 'array', 'description': 'Local file paths of attachments (optional).', 'required': False}, {'name': 'notebook_id', 'type': 'string', 'description': 'The ID of the notebook to add the note to (optional).', 'required': False}]
returns: List[ArgReturn] = [{'name': 'note_id', 'type': 'string', 'description': 'The unique identifier of the created note.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': 'The title or content is empty or the attachments have invalid file paths.'}] |
class EigenDataset(EigenDatasetBase, MatrixDataset):
def __init__(self, num_features=20, sparse=True, **kwargs):
super().__init__(num_features=num_features, sparse=sparse, **kwargs) |
def test_not_captured(capfd):
msg = 'Something that should not show up in log'
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
(stdout, stderr) = capfd.readouterr()
assert (stdout == msg)
assert (stderr == '')
assert (stream.getvalue() == '')
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
(stdout, stderr) = capfd.readouterr()
assert (stdout == '')
assert (stderr == '')
assert (stream.getvalue() == msg) |
def main(rag_example_args: 'RagExampleArguments', processing_args: 'ProcessingArguments', index_hnsw_args: 'IndexHnswArguments'):
logger.info('Step 1 - Create the dataset')
assert os.path.isfile(rag_example_args.csv_path), 'Please provide a valid path to a csv file'
dataset = load_dataset('csv', data_files=[rag_example_args.csv_path], split='train', delimiter='\t', column_names=['title', 'text'])
dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc)
ctx_encoder = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=device)
ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
new_features = Features({'text': Value('string'), 'title': Value('string'), 'embeddings': Sequence(Value('float32'))})
dataset = dataset.map(partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer), batched=True, batch_size=processing_args.batch_size, features=new_features)
passages_path = os.path.join(rag_example_args.output_dir, 'my_knowledge_dataset')
dataset.save_to_disk(passages_path)
logger.info('Step 2 - Index the dataset')
index = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('embeddings', custom_index=index)
index_path = os.path.join(rag_example_args.output_dir, 'my_knowledge_dataset_hnsw_index.faiss')
dataset.get_index('embeddings').save(index_path) |
def enum_product_projective_finite_field(X):
if is_Scheme(X):
if (not is_ProductProjectiveSpaces(X.ambient_space())):
raise TypeError('ambient space must be product of projective space over the rational field')
X = X(X.base_ring())
elif (not is_ProductProjectiveSpaces(X.codomain().ambient_space())):
raise TypeError('codomain must be product of projective space over the rational field')
R = X.codomain().ambient_space()
pts = []
for P in R.rational_points():
try:
pts.append(X(P))
except TypeError:
pass
pts.sort()
return pts |
class CartesianProduct_iters(EnumeratedSetFromIterator):
def __init__(self, *iters):
self.iters = iters
self._mrange = xmrange_iter(iters)
category = EnumeratedSets()
try:
category = (category.Finite() if self.is_finite() else category.Infinite())
except ValueError:
pass
def iterfunc():
return self.__iterate__()
name = ('Cartesian product of ' + ', '.join(map(str, self.iters)))
EnumeratedSetFromIterator.__init__(self, iterfunc, name=name, category=category, cache=False)
def __contains__(self, x):
try:
return ((len(x) == len(self.iters)) and all(((x[i] in self.iters[i]) for i in range(len(self.iters)))))
except (TypeError, IndexError):
return False
def __reduce__(self):
return (self.__class__, self.iters)
def __repr__(self):
return ('Cartesian product of ' + ', '.join(map(str, self.iters)))
def cardinality(self):
return self._mrange.cardinality()
def __len__(self):
return len(self._mrange)
def list(self):
return [e for e in self]
def __iterate__(self):
return iter(self._mrange)
def is_finite(self):
finites = [_is_finite(L, fallback=None) for L in self.iters]
if any(((f is None) for f in finites)):
raise ValueError('unable to determine whether this product is finite')
if all(((f is True) for f in finites)):
return True
lens = [_len(L) for L in self.iters]
if any(((l == 0) for l in lens)):
return True
return False
def unrank(self, x):
try:
lens = [_len(it) for it in self.iters]
except (TypeError, AttributeError):
return CartesianProduct_iters.unrank(self, x)
positions = []
for n in lens:
if (n is infinity):
return CartesianProduct_iters.unrank(self, x)
if (n == 0):
raise IndexError('Cartesian Product is empty')
positions.append((x % n))
x = (x // n)
if (x != 0):
raise IndexError('x larger than the size of the Cartesian Product')
positions.reverse()
return [unrank(L, i) for (L, i) in zip(self.iters, positions)]
def random_element(self):
return [rnd.choice(w) for w in self.iters] |
def linear_warmup_decay(learning_rate, warmup_steps, num_train_steps):
with F.default_main_program()._lr_schedule_guard():
lr = L.tensor.create_global_var(shape=[1], value=0.0, dtype='float32', persistable=True, name='scheduled_learning_rate')
global_step = L.learning_rate_scheduler._decay_step_counter()
warmup_lr = (learning_rate * (global_step / warmup_steps))
poly_decay_lr = L.learning_rate_scheduler.polynomial_decay(learning_rate=learning_rate, decay_steps=num_train_steps, end_learning_rate=0.0, power=1.0, cycle=False)
decayed_lr = L.elementwise_min(warmup_lr, poly_decay_lr)
L.assign(decayed_lr, lr)
return lr |
def write_to_json_file(file_path, dict):
directory = os.path.dirname(file_path)
os.makedirs(directory, exist_ok=True)
for k in dict.keys():
if isinstance(dict[k], (np.float32, np.float64)):
dict[k] = dict[k].item()
json_obj = json.dumps(dict)
fout = open(file_path, 'w')
fout.write(json_obj)
fout.close() |
_model
def gluon_resnet50_v1e(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['gluon_resnet50_v1e']
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, stem_width=64, stem_type='deep', avg_down=True, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def setup_distributed(local_rank: int, no_cuda: bool) -> typing.Tuple[(torch.device, int, bool)]:
if ((local_rank != (- 1)) and (not no_cuda)):
torch.cuda.set_device(local_rank)
device: torch.device = torch.device('cuda', local_rank)
n_gpu = 1
dist.init_process_group(backend='nccl')
elif ((not torch.cuda.is_available()) or no_cuda):
device = torch.device('cpu')
n_gpu = 1
else:
device = torch.device('cuda')
n_gpu = torch.cuda.device_count()
is_master = (local_rank in ((- 1), 0))
return (device, n_gpu, is_master) |
def read_image(image_path):
msg = '{0} library is not installed. Use "pip install {0}" to install it.'
try:
import numpy as np
except:
raise ImportError(msg.format('numpy'))
try:
from PIL import Image
except:
raise ImportError(msg.format('pillow'))
Image.MAX_IMAGE_PIXELS = None
raw_image = Image.open(image_path)
image_format = raw_image.format
raw_image = raw_image.convert('L')
data = np.array(raw_image)
if (image_format not in ('TIFF', 'PNG', 'BMP', 'GIF', 'JPEG')):
raise ValueError('"{}" format is not supported at the moment.'.format(raw_image.format))
mode_to_bpp = {'1': 1, 'L': 8, 'P': 8, 'I;16': 16, 'RGB': 24, 'RGBA': 32, 'CMYK': 32, 'YCbCr': 24, 'I': 32, 'F': 32}
bpp = mode_to_bpp[raw_image.mode]
limit_value = float(((2 ** bpp) - 1))
return {'data': data, 'raw_image': raw_image, 'limit_value': limit_value} |
def clean_data_home(data_home: Optional[Union[(str, Path)]]=None):
data_home = get_data_home(data_home)
for file in listdir(data_home):
if isfile(join(data_home, file)):
remove(join(data_home, file)) |
class AutoConfig(object):
def __init__(self):
raise EnvironmentError('AutoConfig is designed to be instantiated using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method.')
def for_model(cls, model_type, *args, **kwargs):
if ('distilbert' in model_type):
return DistilBertConfig(*args, **kwargs)
elif ('roberta' in model_type):
return RobertaConfig(*args, **kwargs)
elif ('bert' in model_type):
return BertConfig(*args, **kwargs)
elif ('openai-gpt' in model_type):
return OpenAIGPTConfig(*args, **kwargs)
elif ('gpt2' in model_type):
return GPT2Config(*args, **kwargs)
elif ('transfo-xl' in model_type):
return TransfoXLConfig(*args, **kwargs)
elif ('xlnet' in model_type):
return XLNetConfig(*args, **kwargs)
elif ('xlm' in model_type):
return XLMConfig(*args, **kwargs)
elif ('ctrl' in model_type):
return CTRLConfig(*args, **kwargs)
elif ('albert' in model_type):
return AlbertConfig(*args, **kwargs)
elif ('camembert' in model_type):
return CamembertConfig(*args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of 'distilbert', 'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', 'xlm', 'roberta', 'ctrl', 'camembert', 'albert'".format(model_type))
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
if ('t5' in pretrained_model_name_or_path):
return T5Config.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('distilbert' in pretrained_model_name_or_path):
return DistilBertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('albert' in pretrained_model_name_or_path):
return AlbertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('camembert' in pretrained_model_name_or_path):
return CamembertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('xlm-roberta' in pretrained_model_name_or_path):
return XLMRobertaConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('roberta' in pretrained_model_name_or_path):
return RobertaConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('bert' in pretrained_model_name_or_path):
return BertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('openai-gpt' in pretrained_model_name_or_path):
return OpenAIGPTConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('gpt2' in pretrained_model_name_or_path):
return GPT2Config.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('transfo-xl' in pretrained_model_name_or_path):
return TransfoXLConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('xlnet' in pretrained_model_name_or_path):
return XLNetConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('xlm' in pretrained_model_name_or_path):
return XLMConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('ctrl' in pretrained_model_name_or_path):
return CTRLConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of 'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', 'xlm-roberta', 'xlm', 'roberta', 'distilbert', 'camembert', 'ctrl', 'albert'".format(pretrained_model_name_or_path)) |
def _read(fileid):
with open((((MAP_DIR + '/') + fileid) + '.map')) as f:
for ln in f:
ln = ln.strip()
if (ln == ''):
continue
(fine, coarse) = ln.split('\t')
assert (coarse in COARSE_TAGS), 'Unexpected coarse tag: {}'.format(coarse)
assert (fine not in _MAPS[fileid]), 'Multiple entries for original tag: {}'.format(fine)
_MAPS[fileid][fine] = coarse |
def execute(bbox: BoundingBox, n5_dir: str=None, group_path: str=None, voxel_size: tuple=None, type: str=None, driver: str='n5'):
assert (driver == 'n5')
if isinstance(voxel_size, tuple):
voxel_size = Cartesian.from_collection(voxel_size)
fsstore = zarr.N5FSStore(n5_dir, anon=True)
img_zarr = zarr.open(fsstore, path=group_path)
img_arr = img_zarr[bbox.slices]
img_chk = Chunk(img_arr, voxel_offset=bbox.start, voxel_size=voxel_size, type=type)
return img_chk |
def load_cam_dtu(file, num_depth=0, interval_scale=1.0):
cam = np.zeros((2, 4, 4))
words = file.read().split()
for i in range(0, 4):
for j in range(0, 4):
extrinsic_index = (((4 * i) + j) + 1)
cam[0][i][j] = words[extrinsic_index]
for i in range(0, 3):
for j in range(0, 3):
intrinsic_index = (((3 * i) + j) + 18)
cam[1][i][j] = words[intrinsic_index]
if (len(words) == 29):
cam[1][3][0] = words[27]
cam[1][3][1] = (float(words[28]) * interval_scale)
cam[1][3][2] = num_depth
cam[1][3][3] = (cam[1][3][0] + (cam[1][3][1] * (num_depth - 1)))
elif (len(words) == 30):
cam[1][3][0] = words[27]
cam[1][3][1] = (float(words[28]) * interval_scale)
cam[1][3][2] = words[29]
cam[1][3][3] = (cam[1][3][0] + (cam[1][3][1] * (num_depth - 1)))
elif (len(words) == 31):
cam[1][3][0] = words[27]
cam[1][3][1] = (float(words[28]) * interval_scale)
cam[1][3][2] = words[29]
cam[1][3][3] = words[30]
else:
cam[1][3][0] = 0
cam[1][3][1] = 0
cam[1][3][2] = 0
cam[1][3][3] = 0
return cam |
def _map_multiprocess(func, iterable, chunksize=1):
with closing(ProcessPool()) as pool:
return pool.imap_unordered(func, iterable, chunksize) |
def buildargs(command):
replace = [('<instance>', 'FILECNF'), ('<seed>', 'RANDOMSEED'), ('<tempdir>', '/tmp')]
toret = []
args = command.split()[1:]
for a in args:
for r in replace:
a = a.replace(r[0], r[1])
toret.append(a)
return toret |
def log(spark):
return spark.createDataFrame(data=[[0, 0, datetime(2019, 8, 22), 4.0], [0, 2, datetime(2019, 8, 23), 3.0], [0, 1, datetime(2019, 8, 27), 2.0], [1, 3, datetime(2019, 8, 24), 3.0], [1, 0, datetime(2019, 8, 25), 4.0], [2, 1, datetime(2019, 8, 26), 5.0], [2, 0, datetime(2019, 8, 26), 5.0], [2, 2, datetime(2019, 8, 26), 3.0], [3, 1, datetime(2019, 8, 26), 5.0], [3, 0, datetime(2019, 8, 26), 5.0], [3, 0, datetime(2019, 8, 26), 1.0]], schema=INTERACTIONS_SCHEMA) |
def read(filename, mmap=False):
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
(file_size, is_big_endian) = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
channels = 1
bit_depth = 8
format_tag = WAVE_FORMAT_PCM
while (fid.tell() < file_size):
chunk_id = fid.read(4)
if (not chunk_id):
if data_chunk_received:
warnings.warn('Reached EOF prematurely; finished at {:d} bytes, expected {:d} bytes from header.'.format(fid.tell(), file_size), WavFileWarning, stacklevel=2)
break
else:
raise ValueError('Unexpected end of file.')
elif (len(chunk_id) < 4):
raise ValueError('Incomplete wav chunk.')
if (chunk_id == b'fmt '):
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
(format_tag, channels, fs) = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if (bit_depth not in (8, 16, 32, 64, 96, 128)):
raise ValueError('Unsupported bit depth: the wav file has {}-bit data.'.format(bit_depth))
elif (chunk_id == b'fact'):
_skip_unknown_chunk(fid, is_big_endian)
elif (chunk_id == b'data'):
data_chunk_received = True
if (not fmt_chunk_received):
raise ValueError('No fmt chunk before data')
data = _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, mmap)
elif (chunk_id == b'LIST'):
_skip_unknown_chunk(fid, is_big_endian)
elif (chunk_id in (b'JUNK', b'Fake')):
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn('Chunk (non-data) not understood, skipping it.', WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if (not hasattr(filename, 'read')):
fid.close()
else:
fid.seek(0)
return (fs, data) |
def test_get_visual_block_single_estimator():
est = LogisticRegression(C=10.0)
est_html_info = _get_visual_block(est)
assert (est_html_info.kind == 'single')
assert (est_html_info.estimators == est)
assert (est_html_info.names == est.__class__.__name__)
assert (est_html_info.name_details == str(est)) |
def BLMatrixMult(LensMatrX, LensMatrY, DriftMatr, DriftMatr0):
InitDriftLenseX = matr_prod(LensMatrX, DriftMatr0)
tRMSfunX = matr_prod(DriftMatr, InitDriftLenseX)
InitDriftLenseY = matr_prod(LensMatrY, DriftMatr0)
tRMSfunY = matr_prod(DriftMatr, InitDriftLenseY)
return (tRMSfunX, tRMSfunY) |
class childnodeTypeSub(supermod.childnodeType):
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel) |
def ed_decode_line(bin_line):
sep_idx = bin_line.find(b'=')
if (sep_idx <= 0):
return (None, None)
key = bin_line[:sep_idx].decode('utf8').lower()
val = bin_line[(sep_idx + 1):].decode('utf8')
if (re.match('^-?[0-9]+$', val) and (key is not 'data')):
val = int(val)
elif re.match('^[0-9a-fA-F]{2}( [0-9a-fA-F]{2})*$', val):
val = [int(x, 16) for x in val.split(' ')]
return (key, val) |
def construct_slurm_args(experiment_name: str, slurm_args: dict):
Path('logs').mkdir(exist_ok=True)
sbatch_args = f'--output=logs/{experiment_name}_%j.log'
for (k, v) in slurm_args.items():
if (k == '_num_gpu'):
if (v > 0):
sbatch_args = f'{sbatch_args} --gres=gpu:{v}'
elif (k == 'node'):
sbatch_args = f'{sbatch_args} -w {v}'
else:
new_flag = convert_flag(k, v)
sbatch_args = f'{sbatch_args} {new_flag}'.strip()
return sbatch_args |
def _execute_nD(func_str, pocketfft_func, x, s, axes, norm, overwrite_x, workers, plan):
xp = array_namespace(x)
if is_numpy(xp):
return pocketfft_func(x, s=s, axes=axes, norm=norm, overwrite_x=overwrite_x, workers=workers, plan=plan)
norm = _validate_fft_args(workers, plan, norm)
if hasattr(xp, 'fft'):
xp_func = getattr(xp.fft, func_str)
return xp_func(x, s=s, axes=axes, norm=norm)
x = np.asarray(x)
y = pocketfft_func(x, s=s, axes=axes, norm=norm)
return xp.asarray(y) |
class Minecraft2DmazeProblem(Problem):
def __init__(self):
super().__init__()
self._width = 14
self._height = 14
self._prob = {'AIR': 0.5, 'DIRT': 0.5}
self._border_tile = 'DIRT'
self._target_path = 20
self._random_probs = True
self._reward_weights = {'regions': 5, 'path-length': 1}
self.path_coords = []
self.path_length = None
'\n Get a list of all the different tile names\n\n Returns:\n string[]: that contains all the tile names\n '
def get_tile_types(self):
return ['AIR', 'DIRT']
'\n Adjust the parameters for the current problem\n\n Parameters:\n width (int): change the width of the problem level\n height (int): change the height of the problem level\n probs (dict(string, float)): change the probability of each tile\n intiialization, the names are "empty", "solid"\n target_path (int): the current path length that the episode turn when it reaches\n rewards (dict(string,float)): the weights of each reward change between the new_stats and old_stats\n '
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._target_path = kwargs.get('target_path', self._target_path)
self._random_probs = kwargs.get('random_probs', self._random_probs)
rewards = kwargs.get('rewards')
if (rewards is not None):
for t in rewards:
if (t in self._reward_weights):
self._reward_weights[t] = rewards[t]
'\n Resets the problem to the initial state and save the start_stats from the starting map.\n Also, it can be used to change values between different environment resets\n\n Parameters:\n start_stats (dict(string,any)): the first stats of the map\n '
def reset(self, start_stats):
super().reset(start_stats)
if self._random_probs:
self._prob['AIR'] = self._random.random()
self._prob['DIRT'] = (1 - self._prob['AIR'])
'\n Get the current stats of the map\n\n Returns:\n dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations.\n The used status are "reigons": number of connected empty tiles, "path-length": the longest path across the map\n '
def get_stats(self, map):
map_locations = get_tile_locations(map, self.get_tile_types())
(self.path_length, self.path_coords) = calc_longest_path(map, map_locations, ['AIR'], get_path=self.render_path)
return {'regions': calc_num_regions(map, map_locations, ['AIR']), 'path-length': self.path_length}
'\n Get the current game reward between two stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n float: the current reward due to the change between the old map stats and the new map stats\n '
def get_reward(self, new_stats, old_stats):
rewards = {'regions': get_range_reward(new_stats['regions'], old_stats['regions'], 1, 1), 'path-length': get_range_reward(new_stats['path-length'], old_stats['path-length'], np.inf, np.inf)}
return ((rewards['regions'] * self._reward_weights['regions']) + (rewards['path-length'] * self._reward_weights['path-length']))
'\n Uses the stats to check if the problem ended (episode_over) which means reached\n a satisfying quality based on the stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n boolean: True if the level reached satisfying quality based on the stats and False otherwise\n '
def get_episode_over(self, new_stats, old_stats):
return ((new_stats['regions'] == 1) and ((new_stats['path-length'] - self._start_stats['path-length']) >= self._target_path))
'\n Get any debug information need to be printed\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n dict(any,any): is a debug information that can be used to debug what is\n happening in the problem\n '
def get_debug_info(self, new_stats, old_stats):
return {'regions': new_stats['regions'], 'path-length': new_stats['path-length'], 'path-imp': (new_stats['path-length'] - self._start_stats['path-length'])}
'\n Get an image on how the map will look like for a specific map\n\n Parameters:\n map (string[][]): the current game map\n\n Returns:\n Image: a pillow image on how the map will look like using the binary graphics\n '
def render(self, map):
if (self._graphics == None):
self._graphics = {'AIR': Image.open((PROB_DIR + '/common/empty.png')).convert('RGBA'), 'DIRT': Image.open((PROB_DIR + '/common/solid.png')).convert('RGBA'), 'path': Image.open((PROB_DIR + '/common/path_g.png')).convert('RGBA')}
spawn_2D_maze(map, self._border_tile, self._border_size)
spawn_2D_path(path=self.path_coords)
return super().render(map, render_path=self.path_coords) |
def test_detector_tokenizer():
sents = [',', '', '', '', '', ',', ',,,,', '3', ':?', '', '']
d = Detector()
d.check_detector_initialized()
detector_tokenizer = d.tokenizer
for text in sents:
print(text)
print('deault', detector_tokenizer.tokenize(text, 'default'))
print('search', detector_tokenizer.tokenize(text, 'search')) |
class ProtoCLS(nn.Module):
def __init__(self, in_dim, out_dim, temp=0.05):
super(ProtoCLS, self).__init__()
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.tmp = temp
self.weight_norm()
def forward(self, x):
x = F.normalize(x)
x = (self.fc(x) / self.tmp)
return x
def weight_norm(self):
w = self.fc.weight.data
norm = w.norm(p=2, dim=1, keepdim=True)
self.fc.weight.data = w.div(norm.expand_as(w)) |
class TwoSourceModel():
def __init__(self, src1_vocab, src2_vocab, tgt_vocab, single, pointer_gen, coverage, diag_loss, load_model, model_file, beam_size, best_val_cer):
self.model = dy.ParameterCollection()
self.src1_vocab = src1_vocab
self.src2_vocab = src2_vocab
self.tgt_vocab = tgt_vocab
self.src1_lookup = self.model.add_lookup_parameters((src1_vocab.length(), EMBEDDING_DIM))
self.src2_lookup = self.model.add_lookup_parameters((src2_vocab.length(), EMBEDDING_DIM))
self.tgt_lookup = self.model.add_lookup_parameters((tgt_vocab.length(), EMBEDDING_DIM))
self.enc1_fwd_lstm = dy.CoupledLSTMBuilder(LSTM_NUM_OF_LAYERS, EMBEDDING_DIM, HIDDEN_DIM, self.model)
self.enc1_bwd_lstm = dy.CoupledLSTMBuilder(LSTM_NUM_OF_LAYERS, EMBEDDING_DIM, HIDDEN_DIM, self.model)
self.pret1_w = self.model.add_parameters((src1_vocab.length(), HIDDEN_DIM))
self.pret1_b = self.model.add_parameters(src1_vocab.length())
self.enc2_fwd_lstm = dy.CoupledLSTMBuilder(LSTM_NUM_OF_LAYERS, EMBEDDING_DIM, HIDDEN_DIM, self.model)
self.enc2_bwd_lstm = dy.CoupledLSTMBuilder(LSTM_NUM_OF_LAYERS, EMBEDDING_DIM, HIDDEN_DIM, self.model)
self.pret2_w = self.model.add_parameters((src2_vocab.length(), HIDDEN_DIM))
self.pret2_b = self.model.add_parameters(src2_vocab.length())
self.att1_w1 = self.model.add_parameters((ATTENTION_SIZE, (HIDDEN_DIM * 2)))
self.att1_w2 = self.model.add_parameters((ATTENTION_SIZE, ((HIDDEN_DIM * LSTM_NUM_OF_LAYERS) * 2)))
self.att1_v = self.model.add_parameters((1, ATTENTION_SIZE))
self.att2_w1 = self.model.add_parameters((ATTENTION_SIZE, (HIDDEN_DIM * 2)))
self.att2_w2 = self.model.add_parameters((ATTENTION_SIZE, ((HIDDEN_DIM * LSTM_NUM_OF_LAYERS) * 2)))
self.att2_v = self.model.add_parameters((1, ATTENTION_SIZE))
self.dec_lstm = dy.CoupledLSTMBuilder(LSTM_NUM_OF_LAYERS, ((HIDDEN_DIM * 4) + EMBEDDING_DIM), HIDDEN_DIM, self.model)
self.W_s = self.model.add_parameters((HIDDEN_DIM, (HIDDEN_DIM * 4)))
self.b_s = self.model.add_parameters(HIDDEN_DIM)
self.dec_w = self.model.add_parameters((tgt_vocab.length(), HIDDEN_DIM))
self.dec_b = self.model.add_parameters(tgt_vocab.length())
self.ptr_w_c = self.model.add_parameters((1, (2 * HIDDEN_DIM)))
self.ptr_w_s = self.model.add_parameters((1, (2 * HIDDEN_DIM)))
self.ptr_w_x = self.model.add_parameters((1, (EMBEDDING_DIM + (4 * HIDDEN_DIM))))
self.w_cov = self.model.add_parameters((ATTENTION_SIZE, 1))
self.single_source = single
self.pointer_gen = pointer_gen
self.coverage = coverage
self.diag_loss = diag_loss
self.model_file = model_file
if load_model:
self.model.populate(load_model)
logging.info('Loaded model: {}'.format(load_model))
self.beam_size = beam_size
self.best_val_cer = best_val_cer
def save(self):
self.model.save(self.model_file)
def run_lstm(self, init_state, input_vecs):
out_vectors = init_state.transduce(input_vecs)
return out_vectors
def embed_idx(self, idx_list, embed_lookup):
return [embed_lookup[idx] for idx in idx_list]
def encode(self, embeds, fwd_lstm, bwd_lstm):
embeds_rev = list(reversed(embeds))
fwd_vectors = self.run_lstm(fwd_lstm.initial_state(), embeds)
bwd_vectors = self.run_lstm(bwd_lstm.initial_state(), embeds_rev)
bwd_vectors = list(reversed(bwd_vectors))
vectors = [dy.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]
return vectors
def encoder_forward(self, src1, src2):
embedded_src1 = self.embed_idx(src1, self.src1_lookup)
if self.single_source:
embedded_src2 = [dy.vecInput(EMBEDDING_DIM) for idx in src2]
else:
embedded_src2 = self.embed_idx(src2, self.src2_lookup)
encoded_src1 = self.encode(embedded_src1, self.enc1_fwd_lstm, self.enc1_bwd_lstm)
encoded_src2 = self.encode(embedded_src2, self.enc2_fwd_lstm, self.enc2_bwd_lstm)
src1_mat = dy.concatenate_cols(encoded_src1)
src1_w1dt = (self.att1_w1 * src1_mat)
src2_mat = dy.concatenate_cols(encoded_src2)
src2_w1dt = (self.att2_w1 * src2_mat)
if (not self.single_source):
start = ((self.W_s * dy.concatenate([encoded_src1[(- 1)], encoded_src2[(- 1)]])) + self.b_s)
else:
start = ((self.W_s * dy.concatenate([encoded_src1[(- 1)], dy.vecInput((2 * HIDDEN_DIM))])) + self.b_s)
last_output_embeddings = self.tgt_lookup[self.tgt_vocab.str2int(EOS)]
c1_t = dy.vecInput((2 * HIDDEN_DIM))
c2_t = dy.vecInput((2 * HIDDEN_DIM))
decoder_state = self.dec_lstm.initial_state([start, dy.tanh(start)]).add_input(dy.concatenate([c1_t, c2_t, last_output_embeddings]))
return (src1_mat, src2_mat, src1_w1dt, src2_w1dt, decoder_state)
def attend(self, input_mat, state, w1dt, w2, v, coverage):
w2dt = (w2 * dy.concatenate(list(state.s())))
if coverage:
w1dt = (w1dt + (self.w_cov * dy.transpose(coverage)))
a_t = dy.transpose((v * dy.tanh(dy.colwise_add(w1dt, w2dt))))
a_t = dy.softmax(a_t)
return (a_t, (input_mat * a_t))
def get_pointergen_probs(self, c_t, state, x_t, a_t, probs, src1):
if (not self.pointer_gen):
return (probs, 1.0)
unk_idx = self.tgt_vocab.str2int(UNK)
p_gen = dy.logistic((((self.ptr_w_c * c_t) + (self.ptr_w_s * dy.concatenate(list(state.s())))) + (self.ptr_w_x * x_t)))
gen_probs = (probs * p_gen)
copy_probs = (a_t * (1 - p_gen))
copy_probs_update = []
for i in gen_probs:
copy_probs_update.append([i])
for (char, prob) in zip(src1, copy_probs):
cur_idx = self.tgt_vocab.str2int(self.src1_vocab.int2str(char))
if (cur_idx == unk_idx):
continue
if isinstance(cur_idx, int):
copy_probs_update[cur_idx].append(prob)
else:
for idx in cur_idx:
copy_probs_update[idx].append((prob / len(cur_idx)))
sum_probs = dy.concatenate([dy.esum(exps) for exps in copy_probs_update])
return (sum_probs, p_gen.scalar_value())
def get_coverage(self, a_t, prev_coverage, training=True):
if (not self.coverage):
if (not training):
return None
return (dy.scalarInput(0), None)
coverage = (a_t + prev_coverage)
if training:
return (dy.sum_elems(dy.min_dim(dy.concatenate([a_t, coverage], d=1), d=1)), coverage)
return coverage
def get_diag_loss(self, a_t, t):
if (self.diag_loss < 0):
return dy.scalarInput(0)
off_diag_elems = [dy.scalarInput(0)]
for (i, prob) in enumerate(a_t):
if ((i < (t - self.diag_loss)) or (i > (t + self.diag_loss))):
off_diag_elems.append(prob)
return dy.esum(off_diag_elems)
def decode_loss(self, src1, src2, tgt):
(src1_mat, src2_mat, src1_w1dt, src2_w1dt, decoder_state) = self.encoder_forward(src1, src2)
(_, prev_coverage) = self.get_coverage(a_t=dy.vecInput(len(src1)), prev_coverage=dy.vecInput(len(src1)))
loss = []
cov_loss = []
diag_loss = []
embedded_tgt = self.embed_idx(tgt, self.tgt_lookup)
last_output_embeddings = self.tgt_lookup[self.tgt_vocab.str2int(EOS)]
for (t, (char, embedded_char)) in enumerate(zip(tgt, embedded_tgt)):
(a_t, c1_t) = self.attend(src1_mat, decoder_state, src1_w1dt, self.att1_w2, self.att1_v, prev_coverage)
if (not self.single_source):
(_, c2_t) = self.attend(src2_mat, decoder_state, src2_w1dt, self.att2_w2, self.att2_v, None)
else:
c2_t = dy.vecInput((2 * HIDDEN_DIM))
x_t = dy.concatenate([c1_t, c2_t, last_output_embeddings])
decoder_state = decoder_state.add_input(x_t)
out_vector = ((self.dec_w * decoder_state.output()) + self.dec_b)
probs = dy.softmax(out_vector)
(probs, _) = self.get_pointergen_probs(c1_t, decoder_state, x_t, a_t, probs, src1)
loss.append((- dy.log(dy.pick(probs, char))))
(cov_loss_cur, prev_coverage) = self.get_coverage(a_t, prev_coverage)
cov_loss.append(cov_loss_cur)
diag_loss.append(self.get_diag_loss(a_t, t))
last_output_embeddings = embedded_char
loss = dy.esum(loss)
cov_loss = dy.esum(cov_loss)
diag_loss = dy.esum(diag_loss)
return ((loss + (COV_LOSS_WEIGHT * cov_loss)) + (DIAG_LOSS_WEIGHT * diag_loss))
def get_loss(self, src1, src2, tgt):
return self.decode_loss(src1, src2, tgt)
def generate_beam(self, src1, src2):
(src1_mat, src2_mat, src1_w1dt, src2_w1dt, decoder_state) = self.encoder_forward(src1, src2)
hypothesis_list = [Hypothesis(text_list=[self.tgt_vocab.str2int(EOS)], decoder_state=decoder_state, c1_t=dy.vecInput((2 * HIDDEN_DIM)), c2_t=dy.vecInput((2 * HIDDEN_DIM)), prev_coverage=self.get_coverage(a_t=dy.vecInput(len(src1)), training=False, prev_coverage=dy.vecInput(len(src1))), score=0.0, p_gens=[])]
completed_list = []
for t in range(int((len(src1) * 1.1))):
new_hyp_list = []
new_hyp_scores = []
for hyp in hypothesis_list:
last_output_embeddings = self.tgt_lookup[hyp.text_list[(- 1)]]
(a_t, c1_t) = self.attend(src1_mat, hyp.decoder_state, src1_w1dt, self.att1_w2, self.att1_v, hyp.prev_coverage)
if (not self.single_source):
(_, c2_t) = self.attend(src2_mat, hyp.decoder_state, src2_w1dt, self.att2_w2, self.att2_v, None)
else:
c2_t = dy.vecInput((2 * HIDDEN_DIM))
x_t = dy.concatenate([c1_t, c2_t, last_output_embeddings])
decoder_state = hyp.decoder_state.add_input(x_t)
probs = dy.softmax(((self.dec_w * decoder_state.output()) + self.dec_b))
(probs, cur_p_gen) = self.get_pointergen_probs(c1_t, decoder_state, x_t, a_t, probs, src1)
probs = probs.npvalue()
for ind in range(len(probs)):
text_list = (hyp.text_list + [ind])
p_gens = (hyp.p_gens + [cur_p_gen])
score = ((hyp.score + math.log(probs[ind])) / (len(text_list) ** 0.0))
coverage = self.get_coverage(a_t, hyp.prev_coverage, training=False)
new_hyp_list.append(Hypothesis(text_list=text_list, decoder_state=decoder_state, c1_t=c1_t, c2_t=c2_t, prev_coverage=coverage, score=score, p_gens=p_gens))
new_hyp_scores.append(score)
top_inds = np.argpartition(np.array(new_hyp_scores), (- self.beam_size))[(- self.beam_size):]
new_hyp_list = np.array(new_hyp_list)[top_inds]
hypothesis_list = []
for new_hyp in new_hyp_list:
if ((new_hyp.text_list[(- 1)] == self.tgt_vocab.str2int(EOS)) and (t > 0)):
completed_list.append(new_hyp)
else:
hypothesis_list.append(new_hyp)
if (len(completed_list) >= self.beam_size):
break
if (len(completed_list) == 0):
sorted(hypothesis_list, key=(lambda x: x.score), reverse=True)
completed_list = [hypothesis_list[0]]
for hyp in completed_list:
hyp.text_list = [self.tgt_vocab.int2str(i) for i in hyp.text_list]
top_hyp = sorted(completed_list, key=(lambda x: x.score), reverse=True)[0]
return (''.join(top_hyp.text_list).replace(EOS, '').strip(), top_hyp.p_gens[1:(- 1)]) |
def FruchtGraph():
edges = {0: [1, 6, 7], 1: [2, 7], 2: [3, 8], 3: [4, 9], 4: [5, 9], 5: [6, 10], 6: [10], 7: [11], 8: [9, 11], 10: [11]}
g = Graph(edges, format='dict_of_lists', name='Frucht graph')
g._circle_embedding(range(7), radius=2, angle=(pi / 2))
g._circle_embedding(range(7, 11), radius=1, angle=(pi / 2))
g._pos[11] = (0, 0)
return g |
(scope='module')
def sdec_ref_data_path(tardis_ref_path):
return os.path.abspath(os.path.join(tardis_ref_path, 'sdec_ref.h5')) |
class LxmertPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class OpenNLPSentenceDetector(object):
def __init__(self):
init_java()
from jnius import autoclass
File = autoclass('java.io.File')
SentenceModel = autoclass('opennlp.tools.sentdetect.SentenceModel')
SentenceDetectorME = autoclass('opennlp.tools.sentdetect.SentenceDetectorME')
sentence_model_file = pkg_resources.resource_filename(__name__, 'opennlp/en-sent.bin')
sentence_model = SentenceModel(File(sentence_model_file))
self._detector = SentenceDetectorME(sentence_model)
def sent_pos_detect(self, text):
return [(span.getStart(), span.getEnd()) for span in self._detector.sentPosDetect(text)] |
def gumbel_softmax_sample(logits, temperature, dim=1):
y = (logits + sample_gumbel(logits.shape, tens_type=type(logits.data)))
return F.softmax((y / temperature), dim=dim) |
class InfoSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self._mp_ctx = mp.get_context('forkserver')
(self.remotes, self.work_remotes) = zip(*[self._mp_ctx.Pipe(duplex=True) for _ in range(nenvs)])
self.ps = [self._mp_ctx.Process(target=infoworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
(observation_space, share_observation_space, action_space) = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space)
def step_async(self, actions):
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, rews, dones, infos) = zip(*results)
return (np.stack(obs), np.stack(rews), np.stack(dones), infos)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
(obs, infos) = zip(*results)
return (np.stack(obs), np.stack(infos))
def get_short_term_goal(self, data):
for (remote, da) in zip(self.remotes, data):
remote.send(('get_short_term_goal', da))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
for remote in self.remotes:
remote.send(('render', mode))
if (mode == 'rgb_array'):
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame) |
def get_metric(pred_list, topk=10):
NDCG = 0.0
HIT = 0.0
MRR = 0.0
for rank in pred_list:
MRR += (1.0 / (rank + 1.0))
if (rank < topk):
NDCG += (1.0 / np.log2((rank + 2.0)))
HIT += 1.0
return ((HIT / len(pred_list)), (NDCG / len(pred_list)), (MRR / len(pred_list))) |
class CausalLMOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None |
def sync(src: str, dst: str, debug: bool=typer.Option(False, help='If true, will write debug information to debug directory.'), multipart: bool=typer.Option(cloud_config.get_flag('multipart_enabled'), help='If true, will use multipart uploads.'), confirm: bool=typer.Option(cloud_config.get_flag('autoconfirm'), '--confirm', '-y', '-f', help='Confirm all transfer prompts'), max_instances: int=typer.Option(cloud_config.get_flag('max_instances'), '--max-instances', '-n', help='Number of gateways'), max_connections: int=typer.Option(cloud_config.get_flag('num_connections'), '--max-connections', help='Number of connections per gateway'), solver: str=typer.Option('direct', '--solver', help='Solver to use for transfer'), solver_required_throughput_gbits: float=typer.Option(1, '--tput', '-t', help='Required throughput to be solved for')):
return run_transfer(src, dst, False, debug, multipart, confirm, max_instances, max_connections, solver, 'sync') |
def gen_plot_from_dict(fn_to_contour, plot_fn, out_base_name, out_dir='results'):
d = dict(fig=None, plot_fn=plot_fn)
for (n, c) in fn_to_contour.items():
(d['fig'], ax) = add_plot(n, c, **d)
gen_plot(out_dir=out_dir, out_base_name=f'{out_base_name}.png') |
class ReflexiveModule_tensor(ReflexiveModule_abstract):
def tensor_factors(self):
tensor_type = self.tensor_type()
if (tensor_type == (0, 1)):
raise NotImplementedError
bmodule = self.base_module()
factors = ([bmodule] * tensor_type[0])
dmodule = bmodule.dual()
if tensor_type[1]:
factors += ([dmodule] * tensor_type[1])
return factors |
class ProjectivePlaneCurvePoint_finite_field(ProjectivePlaneCurvePoint_field, SchemeMorphism_point_projective_finite_field):
pass |
def binary_weight_convolution_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, base_axis=1, pad=None, stride=None, dilation=None, group=1, quantize_zero_to=1.0):
dy = grad_inputs[0]
x0 = inputs[0]
raise NotImplementedError('binary_weight_convolution_backward is not implemented.') |
def flop_count_operators(model: nn.Module, inputs: list, **kwargs) -> typing.DefaultDict[(str, float)]:
return _wrapper_count_operators(model=model, inputs=inputs, mode=FLOPS_MODE, **kwargs) |
_driver.jit(device=True)
def _get_ob(state_arr, observation_arr, kEnvId, kThisAgentId):
state = state_arr[(kEnvId, kThisAgentId)]
observation_arr[(kEnvId, kThisAgentId, 0)] = math.cos(state[0])
observation_arr[(kEnvId, kThisAgentId, 1)] = math.sin(state[0])
observation_arr[(kEnvId, kThisAgentId, 2)] = math.cos(state[1])
observation_arr[(kEnvId, kThisAgentId, 3)] = math.sin(state[1])
observation_arr[(kEnvId, kThisAgentId, 4)] = state[2]
observation_arr[(kEnvId, kThisAgentId, 5)] = state[3] |
def _create_schema_embeddings(bert_config, schema_embedding_file, dataset_config):
if (not tf.io.gfile.exists(FLAGS.schema_embedding_dir)):
tf.io.gfile.makedirs(FLAGS.schema_embedding_dir)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
schema_emb_run_config = tf.contrib.tpu.RunConfig(master=FLAGS.master, tpu_config=tf.contrib.tpu.TPUConfig(num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host))
schema_json_path = os.path.join(FLAGS.dstc8_data_dir, FLAGS.dataset_split, 'schema.json')
schemas = schema.Schema(schema_json_path)
bert_init_ckpt = os.path.join(FLAGS.bert_ckpt_dir, 'bert_model.ckpt')
schema_emb_model_fn = extract_schema_embedding.model_fn_builder(bert_config=bert_config, init_checkpoint=bert_init_ckpt, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
schema_emb_estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=schema_emb_model_fn, config=schema_emb_run_config, predict_batch_size=FLAGS.predict_batch_size)
vocab_file = os.path.join(FLAGS.bert_ckpt_dir, 'vocab.txt')
tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=FLAGS.do_lower_case)
emb_generator = extract_schema_embedding.SchemaEmbeddingGenerator(tokenizer, schema_emb_estimator, FLAGS.max_seq_length)
emb_generator.save_embeddings(schemas, schema_embedding_file, dataset_config) |
class DomainNameCachingService(Service):
__auto_root: bool
def __init__(self, autoRoot: bool=True):
super().__init__()
self.__auto_root = autoRoot
self.addDependency('Base', False, False)
if autoRoot:
self.addDependency('DomainNameService', False, False)
def _createServer(self) -> DomainNameCachingServer:
return DomainNameCachingServer()
def getName(self) -> str:
return 'DomainNameCachingService'
def getConflicts(self) -> List[str]:
return ['DomainNameService']
def configure(self, emulator: Emulator):
super().configure(emulator)
targets = self.getTargets()
for (server, node) in targets:
server.configure(emulator)
if self.__auto_root:
dns_layer: DomainNameService = emulator.getRegistry().get('seedemu', 'layer', 'DomainNameService')
root_zone = dns_layer.getRootZone()
root_servers = root_zone.getGuleRecords()
for (server, node) in targets:
server.setRootServers(root_servers)
def print(self, indent: int) -> str:
out = (' ' * indent)
out += 'DomainNameCachingService:\n'
indent += 4
out += (' ' * indent)
out += 'Configure root hint: {}\n'.format(self.__auto_root)
return out |
class EditableCandidate(_InstallRequirementBackedCandidate):
is_editable = True
def __init__(self, link, template, factory, name=None, version=None):
super(EditableCandidate, self).__init__(link=link, source_link=link, ireq=make_install_req_from_editable(link, template), factory=factory, name=name, version=version)
def _prepare_abstract_distribution(self):
return self._factory.preparer.prepare_editable_requirement(self._ireq) |
class Environment(object):
def make(cls, domain, subdomain):
if (domain == 'miniwob'):
from wge.miniwob.environment import MiniWoBEnvironment
return MiniWoBEnvironment(subdomain)
elif (domain == 'formwob'):
from wge.formwob.environment import FormWoBEnvironment
return FormWoBEnvironment(subdomain)
raise ValueError('Unknown domain name {}'.format(domain))
def configure(self, num_instances=1, **kwargs):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def step(self, actions):
raise NotImplementedError
def close(self):
raise NotImplementedError
def num_instances(self):
raise NotImplementedError |
class Seq2VecEncoder(_EncoderBase):
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError |
class BDFormat(Enum):
INT8 = 0
FP16 = 1
FP32 = 2
INT16 = 3
INT32 = 4
BFP16 = 5
UNKNOWN = (- 1) |
def import_from_path(name):
splitted = name.split('.')
package_name = '.'.join(splitted[:(- 1)])
cls = splitted[(- 1)]
package = importlib.import_module(package_name)
imported = getattr(package, cls)
return imported |
class GraphHandler(object):
def __init__(self, model):
self.model = model
self.saver = tf.train.Saver(max_to_keep=3)
self.writer = None
def initialize(self, sess):
sess.run(tf.global_variables_initializer())
if (cfg.load_model or (cfg.mode != 'train')):
self.restore(sess)
if (cfg.mode == 'train'):
self.writer = tf.summary.FileWriter(logdir=cfg.summary_dir, graph=None)
def add_summary(self, summary, global_step):
_logger.add()
_logger.add('saving summary...')
self.writer.add_summary(summary, global_step)
_logger.done()
def add_summaries(self, summaries, global_step):
for summary in summaries:
self.add_summary(summary, global_step)
def save(self, sess, global_step=None):
_logger.add()
_logger.add(('saving model to %s' % cfg.ckpt_path))
self.saver.save(sess, cfg.ckpt_path, global_step)
_logger.done()
def restore(self, sess):
_logger.add()
if (cfg.load_step is None):
if (cfg.load_path is None):
_logger.add(('trying to restore from dir %s' % cfg.ckpt_dir))
latest_checkpoint_path = tf.train.latest_checkpoint(cfg.ckpt_dir)
else:
latest_checkpoint_path = cfg.load_path
else:
latest_checkpoint_path = ((cfg.ckpt_path + '-') + str(cfg.load_step))
if (latest_checkpoint_path is not None):
_logger.add(('trying to restore from ckpt file %s' % latest_checkpoint_path))
try:
self.saver.restore(sess, latest_checkpoint_path)
_logger.add('success to restore')
except tf.errors.NotFoundError:
_logger.add('failure to restore')
if (cfg.mode != 'train'):
raise FileNotFoundError('canot find model file')
else:
_logger.add(('No check point file in dir %s ' % cfg.ckpt_dir))
if (cfg.mode != 'train'):
raise FileNotFoundError('canot find model file')
_logger.done() |
def register_Ns3LteRrcSapRlcConfig_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::RlcConfig const &', 'arg0')])
cls.add_instance_attribute('choice', 'ns3::LteRrcSap::RlcConfig::direction', is_const=False)
return |
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
if (torch is None):
raise RuntimeError('pytorch is not installed')
assert (torch.is_tensor(tensor) and (tensor.ndim == 4))
assert (len(mean) == 3)
assert (len(std) == 3)
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[(img_id, ...)].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs |
class Power2OrZPred(FunPred):
sig = (Value,)
code = 'isPowerOf2OrZero'
type_constraints = _one_int |
('/get_block/<blockNumber>', methods=('GET',))
def get_block(blockNumber):
web3 = connect_to_geth(app.web3_url, app.consensus)
if (blockNumber == 'latest'):
blockNumber = web3.eth.getBlock('latest').number
block = web3.eth.get_block(int(blockNumber))
resp = Response(json.dumps(dict(block), cls=HexJsonEncoder, indent=5))
resp.headers['Content-Type'] = 'application/json'
return resp |
def _sinkhorn_distance(x, y, d):
t0 = time.time()
m = ot.sinkhorn2(x, y, d, 0.1, method='sinkhorn')
logger.debug(('%8f secs for Sinkhorn dist. \t#source_nbr: %d, #target_nbr: %d' % ((time.time() - t0), len(x), len(y))))
return m |
class _ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm):
super(_ASPPModule, self).__init__()
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
class Net(nn.Module):
def __init__(self, opt):
super().__init__()
self.sub_mean = ops.MeanShift(255)
self.add_mean = ops.MeanShift(255, sign=1)
head = [ops.DownBlock(opt.scale), nn.Conv2d((3 * (opt.scale ** 2)), opt.num_channels, 3, 1, 1)]
body = list()
for _ in range(opt.num_blocks):
body += [ops.ResBlock(opt.num_channels, opt.res_scale)]
body += [nn.Conv2d(opt.num_channels, opt.num_channels, 3, 1, 1)]
tail = [ops.Upsampler(opt.num_channels, opt.scale), nn.Conv2d(opt.num_channels, 3, 3, 1, 1)]
self.head = nn.Sequential(*head)
self.body = nn.Sequential(*body)
self.tail = nn.Sequential(*tail)
self.opt = opt
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x |
def _denominator(t_slice_shape, precision, unroll=1):
def fwd(qs, ks):
def body(p, qk):
(q, k) = qk
p += k
x = jnp.einsum('...m,...m->...', q, p, precision=precision)
return (p, x)
p = jnp.zeros(t_slice_shape)
(p, R) = lax.scan(body, p, (qs, ks), unroll=unroll)
return (R, (qs, ks, p))
def bwd(qkp, R_ct):
def body(carry, qkx):
(p, p_ct) = carry
(q, k, x_ct) = qkx
q_ct = jnp.einsum('...,...m->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...,...m->...m', x_ct, q, precision=precision)
k_ct = p_ct
p -= k
return ((p, p_ct), (q_ct, k_ct))
(qs, ks, p) = qkp
(_, (qs_ct, ks_ct)) = lax.scan(body, (p, jnp.zeros_like(p)), (qs, ks, R_ct), reverse=True, unroll=unroll)
return (qs_ct, ks_ct)
_vjp
def _denominator_impl(qs, ks):
(R, _) = fwd(qs, ks)
return R
_denominator_impl.defvjp(fwd, bwd)
return _denominator_impl |
class Statistics():
def __init__(self):
self.tp = {}
self.fp = {}
self.tn = {}
self.fn = {}
self.t0 = [round(t, 2) for t in np.linspace(0, 1, 21)]
self.t0[0] = 0.001
self.t0[(- 1)] = 0.999
for t in self.t0:
self.tp[t] = 0
self.fp[t] = 0
self.fn[t] = 0
assert (0.5 in self.tp)
def pr_curve(self):
pr = []
for t in self.t0:
pr.append([t, self.precision(t), self.recall(t)])
return pr
def update(self, a0, gt0):
for t in self.t0:
a = (a0 >= t).type(torch.cuda.LongTensor)
gt = (gt0 >= t).type(torch.cuda.LongTensor)
tp = int(torch.sum(torch.mul(a, gt)).item())
fp = int(torch.sum(torch.mul(a, (1 - gt))).item())
fn = int(torch.sum(torch.mul((1 - a), gt)).item())
self.tp[t] += tp
self.fp[t] += fp
self.fn[t] += fn
def mAP(self):
p = [self.precision(t) for t in self.t0]
r = [self.recall(t) for t in self.t0]
integral = np.abs(np.trapz(p, x=r))
return integral
def precision(self, t=0.5):
p = (self.tp[t] + self.fp[t])
if (p != 0):
return (self.tp[t] / p)
else:
return 0
def recall(self, t=0.5):
r = (self.tp[t] + self.fn[t])
if (r != 0):
return (self.tp[t] / r)
else:
return 0
def f1(self):
f1 = 0
for t in self.t0:
tmp = (self.precision(t) + self.recall(t))
if (tmp != 0):
f1 += ((2.0 * (self.precision(t) * self.recall(t))) / tmp)
return (f1 / len(self.t0)) |
def get_tree_starting_at(module, edges):
vertices_seen = [module]
new_edges = [edge for edge in edges if ((edge[0] == module) and (edge[1] != module) and ('__init__.py' not in edge[1]))]
tree = [module]
while (len(new_edges) > 0):
tree.append(new_edges)
final_vertices = list({edge[1] for edge in new_edges})
vertices_seen.extend(final_vertices)
new_edges = [edge for edge in edges if ((edge[0] in final_vertices) and (edge[1] not in vertices_seen) and ('__init__.py' not in edge[1]))]
return tree |
class XLNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], **kwargs):
super(XLNetTokenizer, self).__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
self.max_len_single_sentence = (self.max_len - 2)
self.max_len_sentences_pair = (self.max_len - 3)
try:
import sentencepiece as spm
except ImportError:
logger.warning('You need to install SentencePiece to use XLNetTokenizer: install sentencepiece')
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
def vocab_size(self):
return len(self.sp_model)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning('You need to install SentencePiece to use XLNetTokenizer: install sentencepiece')
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if (six.PY2 and isinstance(outputs, str)):
outputs = outputs.decode('utf-8')
if (not self.keep_accents):
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if (not unicodedata.combining(c))])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, return_unicode=True, sample=False):
text = self.preprocess_text(text)
if (six.PY2 and isinstance(text, unicode)):
text = text.encode('utf-8')
if (not sample):
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if ((len(piece) > 1) and (piece[(- 1)] == ',') and piece[(- 2)].isdigit()):
cur_pieces = self.sp_model.EncodeAsPieces(piece[:(- 1)].replace(SPIECE_UNDERLINE, ''))
if ((piece[0] != SPIECE_UNDERLINE) and (cur_pieces[0][0] == SPIECE_UNDERLINE)):
if (len(cur_pieces[0]) == 1):
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[(- 1)])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
if (six.PY2 and return_unicode):
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = piece.decode('utf-8')
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def _convert_token_to_id(self, token):
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index, return_unicode=True):
token = self.sp_model.IdToPiece(index)
if (six.PY2 and return_unicode and isinstance(token, str)):
token = token.decode('utf-8')
return token
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def add_special_tokens_single_sequence(self, token_ids):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return ((token_ids + sep) + cls)
def add_special_tokens_sequence_pair(self, token_ids_0, token_ids_1):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return ((((token_ids_0 + sep) + token_ids_1) + sep) + cls)
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
cls_segment_id = [2]
return (((len((token_ids_0 + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) + cls_segment_id)
def save_vocabulary(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
class EnhancedSet():
def __init__(self, data=None):
if data:
self.data = set(data)
else:
self.data = set()
def __iter__(self):
return self.data.__iter__()
def __contains__(self, datum):
return (datum in self.data)
def __len__(self):
return len(self.data)
def __eq__(self, other):
if isinstance(other, EnhancedSet):
return (self.data == other.data)
else:
return False
def __hash__(self):
return hash(self.data)
def __repr__(self):
return sorted(self.data).__repr__()
def __str__(self):
return sorted(self.data).__str__()
def filter(self, function):
filtered = set()
for datum in self.data:
if function(datum):
filtered.add(datum)
return EnhancedSet(filtered)
def categorize(self, categorizer, corpora=None, reference=None):
categorized = {}
for datum in self.data:
category = categorizer(datum)
if (category not in categorized):
categorized[category] = EnhancedSet()
categorized[category].data.add(datum)
return StructuredCoreferenceAnalysis(categorized, corpora, reference)
def intersection(self, other):
return EnhancedSet(self.data.intersection(other.data))
def difference(self, other):
return EnhancedSet(self.data.difference(other.data)) |
def check_path(path):
if (not os.path.exists(path)):
os.makedirs(path)
print(f'{path} created') |
('/accumulate', methods=['POST'])
def getUpdateNotification():
print(dir(request))
data = request.get_json()
print(data)
pload = data['subscriptionId']
subId.append(data['subscriptionId'])
print(pload)
return 'Done' |
class MLPHead(nn.Module):
def __init__(self, in_channels, mlp_hidden_size, projection_size):
super(MLPHead, self).__init__()
self.net = nn.Sequential(nn.Linear(in_channels, mlp_hidden_size), nn.BatchNorm1d(mlp_hidden_size), nn.ReLU(inplace=True), nn.Linear(mlp_hidden_size, projection_size))
def forward(self, x):
return self.net(x) |
def _make_constant(nodes, predicate):
for n in nodes.values():
if predicate(n):
for i in n.in_edges:
i.remove_output(n)
n.args.clear()
n.kwargs.clear()
n.type = NodeTypes.CONSTANT |
def infer_abbr(class_type):
if (not inspect.isclass(class_type)):
raise TypeError(f'class_type must be a type, but got {type(class_type)}')
if hasattr(class_type, '_abbr_'):
return class_type._abbr_
if issubclass(class_type, _InstanceNorm):
return 'in'
elif issubclass(class_type, _BatchNorm):
return 'bn'
elif issubclass(class_type, nn.GroupNorm):
return 'gn'
elif issubclass(class_type, nn.LayerNorm):
return 'ln'
else:
class_name = class_type.__name__.lower()
if ('batch' in class_name):
return 'bn'
elif ('group' in class_name):
return 'gn'
elif ('layer' in class_name):
return 'ln'
elif ('instance' in class_name):
return 'in'
else:
return 'norm_layer' |
def preprocess_for_train(image, height, width, bbox, fast_mode=True, scope=None):
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if (bbox is None):
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
if (image.dtype != tf.float32):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox)
tf.image_summary('image_with_bounding_boxes', image_with_box)
(distorted_image, distorted_bbox) = distorted_bounding_box_crop(image, bbox)
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), distorted_bbox)
tf.image_summary('images_with_distorted_bounding_box', image_with_distorted_box)
num_resize_cases = (1 if fast_mode else 4)
distorted_image = apply_with_random_selector(distorted_image, (lambda x, method: tf.image.resize_images(x, [height, width], method=method)), num_cases=num_resize_cases)
tf.image_summary('cropped_resized_image', tf.expand_dims(distorted_image, 0))
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = apply_with_random_selector(distorted_image, (lambda x, ordering: distort_color(x, ordering, fast_mode)), num_cases=4)
tf.image_summary('final_distorted_image', tf.expand_dims(distorted_image, 0))
distorted_image = tf.sub(distorted_image, 0.5)
distorted_image = tf.mul(distorted_image, 2.0)
return distorted_image |
class chi_gen(rv_continuous):
def _shape_info(self):
return [_ShapeInfo('df', False, (0, np.inf), (False, False))]
def _rvs(self, df, size=None, random_state=None):
return np.sqrt(chi2.rvs(df, size=size, random_state=random_state))
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = ((np.log(2) - ((0.5 * np.log(2)) * df)) - sc.gammaln((0.5 * df)))
return ((l + sc.xlogy((df - 1.0), x)) - (0.5 * (x ** 2)))
def _cdf(self, x, df):
return sc.gammainc((0.5 * df), (0.5 * (x ** 2)))
def _sf(self, x, df):
return sc.gammaincc((0.5 * df), (0.5 * (x ** 2)))
def _ppf(self, q, df):
return np.sqrt((2 * sc.gammaincinv((0.5 * df), q)))
def _isf(self, q, df):
return np.sqrt((2 * sc.gammainccinv((0.5 * df), q)))
def _stats(self, df):
mu = (np.sqrt(2) * sc.poch((0.5 * df), 0.5))
mu2 = (df - (mu * mu))
g1 = (((2 * (mu ** 3.0)) + (mu * (1 - (2 * df)))) / np.asarray(np.power(mu2, 1.5)))
g2 = ((((2 * df) * (1.0 - df)) - (6 * (mu ** 4))) + ((4 * (mu ** 2)) * ((2 * df) - 1)))
g2 /= np.asarray((mu2 ** 2.0))
return (mu, mu2, g1, g2)
def _entropy(self, df):
def regular_formula(df):
return (sc.gammaln((0.5 * df)) + (0.5 * ((df - np.log(2)) - ((df - 1) * sc.digamma((0.5 * df))))))
def asymptotic_formula(df):
return (((((0.5 + (np.log(np.pi) / 2)) - ((df ** (- 1)) / 6)) - ((df ** (- 2)) / 6)) - ((4 / 45) * (df ** (- 3)))) + ((df ** (- 4)) / 15))
return _lazywhere((df < 300.0), (df,), regular_formula, f2=asymptotic_formula) |
def configure_logger(model_args: ModelArguments, training_args: TrainingArguments):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logging_level = logging.WARNING
if model_args.verbose_logging:
logging_level = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
logging_level = logging.INFO
logger.setLevel(logging_level) |
def brightness_up_mapping(level, src_img):
if (level == 1):
factor = 0.5
else:
factor = level
noisy_factor = ((1 + (factor * 0.2)) + np.random.uniform((- 0.01), 0.01))
return ImageEnhance.Brightness(src_img).enhance(noisy_factor) |
class SingleImageDataset(VisionDataset):
def __init__(self, root, pairs_file=None, num_images=1000, extensions='.jpg', height=256, Train=True, down_scale=1):
self.height = 512
self.width = 1024
self.num_images = num_images
assert ((down_scale == 1) or (down_scale == 2)), 'only support resolution of 1024X512 and 512X256'
self.downscale = down_scale
(transform, target_transform) = self.init_crops_transform()
super(SingleImageDataset, self).__init__(root, transform=transform, target_transform=target_transform)
self.pano_frames = sorted(os.listdir(root))
self.extensions = extensions
self.train = Train
prompt_json_file_train = './prompts/my_sun360_prompts_no360.json'
self.prompts = json.load(open(prompt_json_file_train, 'r'))
self.mask = read_mask('datasets/90binarymask.png', self.height, self.width)
def __getitem__(self, index):
pano = self.pano_frames[index]
full_path = os.path.join(self.root, pano, 'panorama.jpg')
target = cv2.imread(full_path)
target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)
source = target.copy()
source[(self.mask == 0)[(..., 0)]] = 0
source = (source.astype(np.float32) / 255.0)
target = ((target.astype(np.float32) / 127.5) - 1.0)
img_idx = full_path.split('/')[(- 2)]
prompt = self.prompts[img_idx]
return dict(jpg=target, txt=prompt, hint=source, mask=np.where((self.mask == 0), 1, 0))
def __len__(self):
return len(self.pano_frames)
def init_crops_transform(self):
transform = transforms.Compose([transforms.Resize((int(self.height), int(self.height))), transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
target_transform = transforms.Compose([transforms.Resize((int(self.height), int(self.height))), transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
return (transform, target_transform)
def normal_transform(self, image):
image_arr = np.array(image)
image_arr = (image_arr / 255).astype(np.float32)
return torch.tensor(image_arr)
def loader(self, path, down_scale=1):
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if (down_scale > 1):
(width, height) = img.size
new_width = int((width / down_scale))
new_height = int((height / down_scale))
new_size = (new_width, new_height)
img = img.resize(new_size)
return img |
def outmess(line, flag=1):
global filepositiontext
if (not verbose):
return
if (not quiet):
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.