code stringlengths 101 5.91M |
|---|
def is_per_channel(qscheme):
return (qscheme in [torch.per_channel_affine, torch.per_channel_affine_float_qparams, torch.per_channel_symmetric]) |
def post_process_result(result):
if (result == float_info.max).any():
mask = (result == float_info.max)
result[mask] = 0
result = np.ma.MaskedArray(result, mask)
return result |
def benchmark_backward(fn, *inputs, grad=None, repeats=10, desc='', verbose=True, amp=False, amp_dtype=torch.float16, **kwinputs):
if verbose:
print(desc, '- Backward pass')
with torch.autocast(device_type='cuda', dtype=amp_dtype, enabled=amp):
y = fn(*inputs, **kwinputs)
if (type(y) is tuple):
y = y[0]
if (grad is None):
grad = torch.randn_like(y)
elif (grad.shape != y.shape):
raise RuntimeError('Grad shape does not match output shape')
def f(*inputs, y, grad):
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
y.backward(grad, retain_graph=True)
t = benchmark.Timer(stmt='f(*inputs, y=y, grad=grad)', globals={'f': f, 'inputs': inputs, 'y': y, 'grad': grad}, num_threads=torch.get_num_threads())
m = t.timeit(repeats)
if verbose:
print(m)
return (t, m) |
def set_weight_decay(model, skip_list=(), skip_keywords=(), lr=None):
assert lr
has_decay = []
no_decay = []
skip_keywords_prefix = []
for (name, module) in model.named_modules():
if isinstance(module, nn.LayerNorm):
skip_keywords_prefix.append(name)
continue
if hasattr(module, 'no_weight_decay'):
nwd_dict = module.no_weight_decay()
for post_fix in nwd_dict:
if (name == ''):
whole_name = post_fix
else:
whole_name = ((name + '.') + post_fix)
skip_keywords_prefix.append(whole_name)
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if ((len(param.shape) == 1) or name.endswith('.bias') or (name in skip_list) or check_keywords_in_name(name, skip_keywords) or check_keywords_match_name_prefix(name, skip_keywords_prefix)):
no_decay.append(param)
else:
has_decay.append(param)
return [{'params': has_decay}, {'params': no_decay, 'weight_decay': 0.0}] |
_utils.in_tempdir
def test_dory_estimate_query_abundance(location):
copy_dory_catlas()
copy_dory_head()
args = '-k 21 dory_k21 --contigs-db dory_k21/bcalm.unitigs.db'.split()
assert (index_cdbg_by_kmer.main(args) == 0)
args = 'dory_k21 dory-head.fa -o abundances.csv -k 21'.split()
print('** running estimate_query_abundance')
assert (estimate_query_abundance.main(args) == 0)
abunds = open('abundances.csv', 'rt').read() |
_datapipe('concat')
class ConcaterMapDataPipe(MapDataPipe):
datapipes: Tuple[MapDataPipe]
length: int
def __init__(self, *datapipes: MapDataPipe):
if (len(datapipes) == 0):
raise ValueError('Expected at least one DataPipe, but got nothing')
if (not all((isinstance(dp, MapDataPipe) for dp in datapipes))):
raise TypeError('Expected all inputs to be `MapDataPipe`')
if (not all((isinstance(dp, Sized) for dp in datapipes))):
raise TypeError('Expected all inputs to be `Sized`')
self.datapipes = datapipes
self.length = (- 1)
def __getitem__(self, index) -> T_co:
offset = 0
for dp in self.datapipes:
if ((index - offset) < len(dp)):
return dp[(index - offset)]
else:
offset += len(dp)
raise IndexError('Index {} is out of range.'.format(index))
def __len__(self) -> int:
if (self.length == (- 1)):
self.length = sum((len(dp) for dp in self.datapipes))
return self.length |
def main(config):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = UGCVQA_NR_model.resnet50(pretrained=True)
model = model.to(device)
print('loading the trained model')
model.load_state_dict(torch.load(config.trained_model))
if (config.database == 'UGCCompressed'):
videos_dir_test = config.videos_dir_test
datainfo_test = config.datainfo_test
transformations_test = transforms.Compose([transforms.Resize(520), transforms.CenterCrop(448), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
valset = VideoDataset_NR(videos_dir_test, datainfo_test, transformations_test, is_train=False)
val_loader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=config.num_workers)
print('Starting testing:')
with torch.no_grad():
model.eval()
label = np.zeros([len(valset)])
y_val = np.zeros([len(valset)])
videos_name = []
for (i, (video, mos, video_name)) in enumerate(val_loader):
print(video_name[0])
videos_name.append(video_name)
video = video.to(device)
label[i] = mos.item()
outputs = model(video)
y_val[i] = outputs.item()
print(y_val[i])
val_PLCC = stats.pearsonr(y_val, label)[0]
val_SRCC = stats.spearmanr(y_val, label)[0]
val_KRCC = stats.stats.kendalltau(y_val, label)[0]
val_RMSE = np.sqrt(((y_val - label) ** 2).mean())
print('SRCC: {:.4f}, KRCC: {:.4f}, PLCC: {:.4f}, and RMSE: {:.4f}'.format(val_SRCC, val_KRCC, val_PLCC, val_RMSE))
output_name = config.output_name
if (not os.path.exists(output_name)):
os.system('touch {}'.format(output_name))
f = open(output_name, 'w')
for i in range(len(valset)):
f.write(videos_name[i][0])
f.write(',')
f.write(str(y_val[i]))
f.write('\n')
f.close() |
def _init_dist_slurm(backend, port=29500, **kwargs):
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device((proc_id % num_gpus))
addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend) |
_function_dispatch(_logspace_dispatcher)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
if (dtype is None):
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype, copy=False) |
def test_sis_model():
params = {'model': 'SIS', 'b': 0.00208, 'd': 0.01, 'c': 1, 'runs': 10, 'steps': 5000, 'diffusion': 'max', 'method': 'add_edge_random', 'k': 15, 'seed': 1, 'plot_transition': False, 'gif_animation': False}
graph = karate()
ds = Diffusion(graph, **params)
increased_diffusion = ds.run_simulation()
params['diffusion'] = None
params['method'] = None
params['k'] = 0
ds = Diffusion(graph, **params)
baseline_diffusion = ds.run_simulation()
params['diffusion'] = 'min'
params['method'] = 'ns_node'
params['k'] = 4
ds = Diffusion(graph, **params)
decreased_diffusion = ds.run_simulation()
assert (sum(decreased_diffusion) < sum(baseline_diffusion) < sum(increased_diffusion)) |
_PREDICTOR_REGISTRY.register()
class DensePoseChartWithConfidencePredictor(DensePoseChartConfidencePredictorMixin, DensePoseChartPredictor):
pass |
class MultiHeadAttention(nn.Module):
def __init__(self, dim: int=512, num_attention_heads: int=8) -> None:
super(MultiHeadAttention, self).__init__()
assert ((dim % num_attention_heads) == 0), 'hidden_dim % num_attention_heads should be zero.'
self.d_head = int((dim / num_attention_heads))
self.num_attention_heads = num_attention_heads
self.query_proj = nn.Linear(dim, (self.d_head * num_attention_heads))
self.key_proj = nn.Linear(dim, (self.d_head * num_attention_heads))
self.value_proj = nn.Linear(dim, (self.d_head * num_attention_heads))
self.scaled_dot_attn = DotProductAttention(dim, scale=True)
def forward(self, query: torch.FloatTensor, key: torch.FloatTensor, value: torch.FloatTensor, mask: Optional[torch.FloatTensor]=None) -> Tuple[(torch.FloatTensor, torch.FloatTensor)]:
batch_size = value.size(0)
query = self.query_proj(query).view(batch_size, (- 1), self.num_attention_heads, self.d_head).transpose(1, 2)
key = self.key_proj(key).view(batch_size, (- 1), self.num_attention_heads, self.d_head).transpose(1, 2)
value = self.value_proj(value).view(batch_size, (- 1), self.num_attention_heads, self.d_head).transpose(1, 2)
if (mask is not None):
mask = mask.unsqueeze(1).repeat(1, self.num_attention_heads, 1, 1)
(context, attn) = self.scaled_dot_attn(query, key, value, mask)
context = context.transpose(1, 2).reshape(batch_size, (- 1), (self.num_attention_heads * self.d_head))
return (context, attn) |
def get_logical_forms_from_entities(entities):
logical_forms = []
if (not entities):
return []
for entity in entities:
logical_forms.extend(webqsp_enum_one_hop_one_entity_candidates(entity))
lfs_2 = webqsp_enum_two_hop_one_entity_candidates(entity)
logical_forms.extend(lfs_2)
if (len(entities) == 2):
logical_forms.extend(webqsp_enum_two_entity_candidates(entities[0], entities[1]))
return logical_forms |
class AppendableSequenceTester(object):
def empty_list(self):
pass
def reference_list(self):
pass
def test_append_getitem(self, empty_list, reference_list):
lst = empty_list
item = reference_list[0]
lst.append(item)
assert (lst[0] == item)
def test_extend(self, empty_list, reference_list):
lst = empty_list
lst.extend(reference_list)
for (i, item) in enumerate(reference_list):
assert (lst[i] == item)
assert (len(lst) == len(reference_list))
def test_len(self, empty_list, reference_list):
lst = empty_list
item = reference_list[0]
lst.append(item)
lst.append(item)
lst.append(item)
lst.append(item)
assert (len(lst) == 4)
def test_iter(self, empty_list, reference_list):
lst = empty_list
lst.extend(reference_list)
for (i, item) in enumerate(lst):
assert (item == reference_list[i])
def test_slice(self, empty_list, reference_list):
lst = empty_list
lst.extend(reference_list)
assert (list(lst[0:2:5]) == reference_list[0:2:5]) |
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if (t.dtype == tf.int64):
t = tf.to_int32(t)
example[name] = t
return example |
def create_sentencepiece(filenames, model_type, vocab_size, output_prefix):
sp.SentencePieceTrainer.train(input=','.join(filenames), model_prefix=output_prefix, vocab_size=vocab_size, model_type=model_type, character_coverage=1.0, unk_id=UNK_TOKEN_ID, bos_id=BOS_TOKEN_ID, eos_id=EOS_TOKEN_ID, pad_id=PAD_TOKEN_ID)
spm = sp.SentencePieceProcessor(model_file=f'{output_prefix}.model')
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (vocab.get(UNK_TOKEN_ID) == UNK_TOKEN)
assert (vocab.get(BOS_TOKEN_ID) == BOS_TOKEN)
assert (vocab.get(EOS_TOKEN_ID) == EOS_TOKEN)
assert (vocab.get(PAD_TOKEN_ID) == PAD_TOKEN)
vocab = {i: s for (i, s) in vocab.items() if (s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN})}
with open(f'{output_prefix}.txt', 'w') as f:
for (_, s) in sorted(vocab.items(), key=(lambda x: x[0])):
print(f'{s} 1', file=f) |
def recall_at_k(actual, predicted, topk):
sum_recall = 0.0
num_users = len(predicted)
true_users = 0
for i in range(num_users):
act_set = set(actual[i])
pred_set = set(predicted[i][:topk])
if (len(act_set) != 0):
sum_recall += (len((act_set & pred_set)) / float(len(act_set)))
true_users += 1
return (sum_recall / true_users) |
class Example(UniqueRepresentation, Parent):
def __init__(self):
self._set = [Integer(_) for _ in [1, 2, 3]]
Parent.__init__(self, facade=IntegerRing(), category=FiniteEnumeratedSets())
def _repr_(self):
return 'An example of a finite enumerated set: {1,2,3}'
def __contains__(self, o):
return (o in self._set)
def __iter__(self):
return iter(self._set) |
def _move_files(dirname, file_prefixes):
print(file_prefixes)
for prefix in file_prefixes:
matching = glob.glob('{}_*'.format(osp.join(dirname, prefix)))
print(matching)
if (len(matching) != 1):
raise NotImplementedError
ext = osp.splitext(matching[0])[1]
output = '{}/{}{}'.format(dirname, prefix, ext)
print(matching[0], output)
shutil.move(matching[0], output) |
class StopwatchMeter(Meter):
def __init__(self, round: Optional[int]=None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1):
if (self.start_time is not None):
delta = (time.perf_counter() - self.start_time)
self.sum = (self.sum + delta)
self.n = (type_as(self.n, n) + n)
def reset(self):
self.sum = 0
self.n = 0
self.start()
def state_dict(self):
return {'sum': self.sum, 'n': self.n, 'round': self.round}
def load_state_dict(self, state_dict):
self.sum = state_dict['sum']
self.n = state_dict['n']
self.start_time = None
self.round = state_dict.get('round', None)
def avg(self):
return ((self.sum / self.n) if (self.n > 0) else self.sum)
def elapsed_time(self):
if (self.start_time is None):
return 0.0
return (time.perf_counter() - self.start_time)
def smoothed_value(self) -> float:
val = (self.avg if (self.sum > 0) else self.elapsed_time)
if ((self.round is not None) and (val is not None)):
val = safe_round(val, self.round)
return val |
class train_discriminator():
def __init__(self, real_data, latent_data, opt_d, generator, discriminator, device, minority_class, majority_class):
self.real_data = real_data
self.latent_data = latent_data
self.opt_d = opt_d
self.discriminator = discriminator
self.generator = generator
self.device = device
self.minority_class = minority_class
self.majority_class = majority_class
def __call__(self):
self.opt_d.zero_grad()
real_preds = self.discriminator(self.real_data)
if (self.minority_class == 0):
real_targets = torch.zeros_like(real_preds, device=self.device)
elif (self.minority_class == 1):
real_targets = torch.ones_like(real_preds, device=self.device)
elif (self.minority_class == 2):
real_targets = torch.full_like(real_preds, 2.0, device=self.device)
if (self.minority_class == 3):
real_targets = torch.full_like(real_preds, 3.0, device=self.device)
criterion = nn.BCEWithLogitsLoss()
real_loss = criterion(real_preds, real_targets)
real_score = torch.mean(real_preds).item()
fake_data = self.generator(self.latent_data)
fake_preds = self.discriminator(fake_data)
if (self.majority_class == 0):
fake_targets = torch.zeros_like(fake_preds, device=self.device)
elif (self.majority_class == 1):
fake_targets = torch.ones_like(fake_preds, device=self.device)
fake_loss = criterion(fake_preds, fake_targets)
fake_score = torch.mean(fake_preds).item()
loss = ((real_loss + fake_loss) / 2)
loss.backward()
self.opt_d.step()
return (loss.item(), real_score, fake_score) |
_cmd('python')
class Python():
ctx = CONTEXT
pythonpath = Option(['--pythonpath', '-p'], metavar='PYTHONPATH', default=None, help='Paths to prepend to PYTHONPATH')
extra_argv = Argument(['extra_argv'], nargs=(- 1), metavar='ARGS', required=False)
def _setup(cls, pythonpath, **kwargs):
vals = Build.opt_defaults()
vals.update(kwargs)
Build.run(add_path=True, **vals)
if pythonpath:
for p in reversed(pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
def run(cls, pythonpath, extra_argv=None, **kwargs):
cls._setup(pythonpath, **kwargs)
if extra_argv:
sys.argv = extra_argv
with open(extra_argv[0]) as f:
script = f.read()
sys.modules['__main__'] = new_module('__main__')
ns = dict(__name__='__main__', __file__=extra_argv[0])
exec(script, ns)
else:
import code
code.interact() |
class TestFeatureImportance(unittest.TestCase):
def test(self):
exp = FeatureImportance(mode='classification')
exp.add(instance=pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['col 1', 'col 2']), target_label=0, feature_names=['a', 'b', 'c'], feature_values=[1, 2, 3], importance_scores=[0.1, 0.2, 0.3])
s = exp.to_json()
self.assertEqual(s, '{"module": "omnixai.explanations.tabular.feature_importance", "class": "FeatureImportance", "data": {"mode": "classification", "explanations": [{"instance": {"col 1": {"0": "a", "1": "c"}, "col 2": {"0": "b", "1": "d"}}, "features": ["a", "b", "c"], "values": [1, 2, 3], "scores": [0.1, 0.2, 0.3], "target_label": 0}]}}')
e = ExplanationBase.from_json(s)
(a, b) = (exp.get_explanations(0), e.get_explanations(0))
for name in ['features', 'values', 'scores']:
self.assertListEqual(a[name], b[name])
self.assertEqual(a['target_label'], b['target_label'])
self.assertListEqual(list(a['instance'].columns), list(b['instance'].columns))
self.assertListEqual(a['instance'].values.tolist(), b['instance'].values.tolist()) |
.parametrize('impl', MKL_AND_CUBLAS)
def test_4x4(impl):
A_desc = dace.float32[(8, 12, 5, 3)]
B_desc = dace.float32[(8, 12, 3, 6)]
C_desc = dace.float32[(8, 12, 5, 6)]
with change_default(blas, impl):
def test_4x4(A: A_desc, B: B_desc, C: C_desc):
C[:] = np.einsum('abik,abkj->abij', A, B)
A = np.random.rand(*A_desc.shape).astype(np.float32)
B = np.random.rand(*B_desc.shape).astype(np.float32)
C = np.zeros(C_desc.shape).astype(np.float32)
sdfg: dace.SDFG = test_4x4.to_sdfg()
sdfg.name = (impl + '_einsum_4x4')
if (impl == 'cuBLAS'):
sdfg.apply_gpu_transformations()
sdfg.expand_library_nodes()
assert_used_environment(sdfg, impl)
sdfg(A=A, B=B, C=C)
assert np.allclose((A B), C) |
class Argument():
__slots__ = ('name', 'typename', 'direction', 'role')
def __init__(self, name, typename, direction, role='default'):
self.name = name
self.typename = typename
self.direction = direction
self.role = role |
def register_coco_instances(name, metadata, json_file, image_root):
DatasetCatalog.register(name, (lambda : load_coco_json(json_file, image_root, name)))
MetadataCatalog.get(name).set(json_file=json_file, image_root=image_root, evaluator_type='coco', **metadata) |
def normalize_index(phyche_index, is_convert_dict=False):
normalize_phyche_value = []
for phyche_value in phyche_index:
average_phyche_value = ((sum(phyche_value) * 1.0) / len(phyche_value))
sd_phyche = standard_deviation(phyche_value)
normalize_phyche_value.append([round(((e - average_phyche_value) / sd_phyche), 2) for e in phyche_value])
if (is_convert_dict is True):
return convert_phyche_index_to_dict(normalize_phyche_value)
return normalize_phyche_value |
_serialization_tests
def test_keras_testing_util_layer_test_multidim(kernel_cls, batch_size, n_dims, n_components):
kernel = kernel_cls()
tf.keras.utils.get_custom_objects()['QuadratureFourierFeatures'] = QuadratureFourierFeatures
layer_test(QuadratureFourierFeatures, kwargs={'kernel': kernel, 'n_components': n_components, 'input_dim': n_dims, 'dtype': 'float64', 'dynamic': True}, input_shape=(batch_size, n_dims), input_dtype='float64') |
class TruncExpon(ReferenceDistribution):
def __init__(self, *, b):
super().__init__(b=b)
def _support(self, b):
return (0, b)
def _pdf(self, x, b):
return ((- mp.exp((- x))) / mp.expm1((- b)))
def _sf(self, x, b):
return ((mp.exp((- b)) - mp.exp((- x))) / mp.expm1((- b))) |
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile('-r(\\d+)$')
_date_re = re.compile('-(20\\d\\d\\d\\d\\d\\d)$')
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip._internal.vcs import vcs, get_src_requirement
if (dist_is_editable(dist) and vcs.get_backend_name(location)):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning('Error when trying to get requirement for VCS system %s, falling back to uneditable format', exc)
req = None
if (req is None):
logger.warning('Could not determine repository location of %s', location)
comments.append('## !! Could not determine repository location')
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert ((len(specs) == 1) and (specs[0][0] in ['==', '==='])), ('Expected 1 spec with == or ===; specs = %r; dist = %r' % (specs, dist))
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if (ver_match or date_match):
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(dist, dependency_links)
if (not svn_location):
logger.warning('Warning: cannot find svn location for %s', req)
comments.append('## FIXME: could not find svn URL in dependency_links for this package:')
else:
deprecated('SVN editable detection based on dependency links will be dropped in the future.', replacement=None, gone_in='18.2', issue=4187)
comments.append(('# Installing as editable to satisfy requirement %s:' % req))
if ver_match:
rev = ver_match.group(1)
else:
rev = ('{%s}' % date_match.group(1))
editable = True
req = ('%%s#egg=%s' % (svn_location, rev, cls.egg_name(dist)))
return cls(dist.project_name, req, editable, comments)
def egg_name(dist):
name = dist.egg_name()
match = re.search('-py\\d\\.\\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = ('-e %s' % req)
return ('\n'.join((list(self.comments) + [str(req)])) + '\n') |
def get_weights(weights):
weights_list = {'resnet': '1Bw4gUsRBxy8XZDGchPJ_URQjbHItikjw', 'resnet18': '1k_v1RrDO6da_NDhBtMZL5c0QSogCmiRn', 'vgg11': '1vZcB-NaPUCovVA-pH-g-3NNJuUA948ni'}
url = f'
output = f'./{weights}.pkl'
gdown.download(url, output, quiet=False) |
def get_dm_mujoco():
global _DM_MUJOCO_MODULE
if _DM_MUJOCO_MODULE:
return _DM_MUJOCO_MODULE
try:
from dm_control import mujoco
except ImportError:
print('Failed to import dm_control.mujoco. Ensure that dm_control (using MuJoCo v2.00) is installed.', file=sys.stderr)
sys.exit(1)
_DM_MUJOCO_MODULE = mujoco
return mujoco |
def frechet_distance(mu, cov, mu2, cov2):
(cc, _) = linalg.sqrtm(np.dot(cov, cov2), disp=False)
dist = (np.sum(((mu - mu2) ** 2)) + np.trace(((cov + cov2) - (2 * cc))))
return np.real(dist) |
class SearchAlgorithm():
def __init__(self) -> None:
pass
def search(self, pattern: str, text: str) -> int:
pass |
class Replace(Common):
def __init__(self, log_level=Log.info):
self.__numNode = 0
self.__ar = []
self.LogLevel = log_level
def __calc(self, plan, param, queryid, planid, depth):
def get_children_plan_rows(plan):
_X = [[], []]
_NP = [[], []]
_NPP = [[], []]
for i in range(0, 2):
p = plan['Plans'][i]
k = (0 if (p['Parent Relationship'] == 'Outer') else 1)
_X[k] = p['Plan Rows']
_NP[k] = p['NormalizeParam']
_NPP[k] = p['NormalizePlanParam']
return (_X[0], _X[1], _NP[0], _NP[1], _NPP[0], _NPP[1])
def get_inputs(plan):
(_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner) = get_children_plan_rows(plan)
if (Log.debug3 <= self.LogLevel):
print('Debug3: Xouter={} Xinner={} normalize({}, {}) normalizePlanPalam({}, {})'.format(_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner))
_RR = self.count_removed_rows(plan)
return (_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner, _RR)
'\n Start processing\n '
_node_type = plan['Node Type']
if (Log.debug1 <= self.LogLevel):
print('Debug1: count={} depth={} Node Type={}'.format(self._count, self._depth, plan['Node Type']))
'\n nested loop type\n '
for n in ('Append', 'Merge Append', 'Recursive Union', 'Nested Loop', 'BitmapAnd', 'BitmapOr'):
if (n == _node_type):
(_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner, _RR) = get_inputs(plan)
if (Log.debug1 <= self.LogLevel):
print('Debug1: Y ActualRows={} NormalizeParam={}'.format(plan['Actual Rows'], plan['NormalizeParam']))
print('Debug1: Xouter ={} NormalizeParam={}'.format(_Xouter, _NPouter))
print('Debug1: Xinner ={} NormalizeParam={}'.format(_Xinner, _NPinner))
_EstimatedRows = round(((param['Coefficient'][0] * _Xouter) * _Xinner))
if (Log.debug1 <= self.LogLevel):
print('Debug1: EstimatedRows({}) = Coef({}) * Xouter({}) * Xinner({})'.format(_EstimatedRows, param['Coefficient'][0], _Xouter, _Xinner))
plan.update(Coefficient=param['Coefficient'][0])
plan.update({'Plan Rows': _EstimatedRows})
if (Log.debug1 <= self.LogLevel):
plan.update(OriginalPlanRows=plan['Plan Rows'])
return
'\n hash or merge join\n '
for n in ('Merge Join', 'Hash Join'):
if (n == _node_type):
(_Xouter, _Xinner, _NPouter, _NPinner, _NPPouter, _NPPinner, _RR) = get_inputs(plan)
if (Log.debug1 <= self.LogLevel):
print('Debug1: Y ActualRows={} NormalizeParam={}'.format(plan['Actual Rows'], plan['NormalizeParam']))
print('Debug1: Xouter ={} NormalizeParam={}'.format(_Xouter, _NPouter))
print('Debug1: Xinner ={} NormalizeParam={}'.format(_Xinner, _NPinner))
if ((param['Coefficient'][0] == 0) and (param['Coefficient'][1] == 0)):
_EstimatedRows = round((((param['Coefficient2'][0] * _Xouter) * _Xinner) + param['Intercept'][0]))
plan.update(Coefficient=[0, 0])
plan.update(Coefficient2=param['Coefficient2'][0])
plan.update(Intercept=param['Intercept'])
if (Log.debug1 <= self.LogLevel):
print('Debug1: EstimatedRows({}) = Coef({}) * Xouter({}) * Xinner({}) + {}'.format(_EstimatedRows, param['Coefficient2'][0], _Xouter, _Xinner, param['Intercept']))
else:
_EstimatedRows = round((((param['Coefficient'][0] * _Xouter) + (param['Coefficient'][1] * _Xinner)) + param['Intercept'][0]))
plan.update(Coefficient=param['Coefficient'])
plan.update(Coefficient2=0)
plan.update(Intercept=param['Intercept'])
if (Log.debug1 <= self.LogLevel):
print('Debug1: EstimatedRows({}) = Coef({}) * Xouter({}) + Coef({}) * Xinner({}) + {}'.format(_EstimatedRows, param['Coefficient'][0], _Xouter, param['Coefficient'][1], _Xinner, param['Intercept']))
plan.update({'Plan Rows': _EstimatedRows})
if (Log.debug1 <= self.LogLevel):
plan.update(OriginalPlanRows=plan['Plan Rows'])
return
'\n scan type\n '
if ('Plan Rows' in plan):
_EstimatedRows = ((param['Coefficient'][0] * plan['Plan Rows']) + param['Intercept'][0])
_EstimatedRows = round(((_EstimatedRows * plan['NormalizePlanParam']) / plan['NormalizeParam']))
if (Log.debug1 <= self.LogLevel):
print('Debug1: EstimatedRows({}) = [Coef({}) * PlanRows({}) + Intercept({})] * NormalizePlan({}) / Normalize({})'.format(_EstimatedRows, param['Coefficient'][0], plan['Plan Rows'], param['Intercept'][0], plan['NormalizePlanParam'], plan['NormalizeParam']))
plan.update(Coefficient=param['Coefficient'][0])
plan.update({'Plan Rows': _EstimatedRows})
if (Log.debug1 <= self.LogLevel):
plan.update(OriginalPlanRows=plan['Plan Rows'])
return
def __replace(self, Plans, Reg_Params, depth, queryid, planid):
def incr(plan):
if ('Node Type' in plan):
self._count += 1
def op(Plans, Reg_Params, queryid, planid):
if isinstance(Plans, list):
for i in range(0, len(Plans)):
incr(Plans[i])
if (self._depth == self._count):
self.__calc(Plans[i], Reg_Params[i], queryid, planid, self._count)
return
elif ('Plans' in Plans[i]):
op(Plans[i]['Plans'], Reg_Params[i]['Plans'], queryid, planid)
else:
incr(Plans)
if (self._depth == self._count):
self.__calc(Plans, Reg_Params, queryid, planid, self._count)
return
elif ('Plans' in Plans):
op(Plans['Plans'], Reg_Params['Plans'], queryid, planid)
self._depth = depth
self._count = 0
op(Plans, Reg_Params, queryid, planid)
'\n Public method\n '
def replace_plan_rows(self, Plans, Reg_Params, numNode, queryid, planid):
i = numNode
if (Log.debug1 <= self.LogLevel):
print('Debug1: >>> Start replace')
while (0 < i):
if (Log.debug1 <= self.LogLevel):
print('Debug1: >>> replace i = {}'.format(i))
self.__replace(Plans, Reg_Params, i, queryid, planid)
i -= 1 |
class DoubleSignal(Signal):
def set(self, value) -> None:
sim.simSetDoubleSignal(self._name, value)
def get(self) -> float:
(ret, value) = sim.simGetDoubleSignal(self._name)
self._check_signal(ret, 'double')
return value
def clear(self) -> int:
return sim.simClearDoubleSignal(self._name) |
def models_all_close(*models):
assert (len(models) > 1)
for model in models[1:]:
are_same_models(models[0], model) |
def main(args):
if (not os.path.exists('./experiments/pretrained_models/NAFNet-REDS-width64.pth')):
gdown.download(' './experiments/pretrained_models/', quiet=False)
opt_path = '/opt/NAFNet/options/test/REDS/NAFNet-width64.yml'
opt = parse(opt_path, is_train=False)
opt['dist'] = False
model = create_model(opt)
fpaths = sorted(glob.glob(f'{args.dpath}/image/*'))
os.makedirs(f'{args.dpath}/image_deblurred', exist_ok=True)
for fpath in fpaths:
print(f'Deblurring {fpath}...')
img = imread(fpath)
img = img2tensor(img)
model.feed_data(data={'lq': img.unsqueeze(dim=0)})
if model.opt['val'].get('grids', False):
model.grids()
model.test()
if model.opt['val'].get('grids', False):
model.grids_inverse()
visuals = model.get_current_visuals()
sr_img = tensor2img([visuals['result']])
path = Path(fpath)
opath = f'{args.dpath}/image_deblurred/{path.name}'
imwrite(sr_img, opath)
os.rename(f'{args.dpath}/image', f'{args.dpath}/image_origin')
os.rename(f'{args.dpath}/image_deblurred', f'{args.dpath}/image') |
class _CVObjects(_Constraint):
def __init__(self):
super().__init__()
self._constraints = [Interval(Integral, 2, None, closed='left'), HasMethods(['split', 'get_n_splits']), _IterablesNotString(), _NoneConstraint()]
def is_satisfied_by(self, val):
return any((c.is_satisfied_by(val) for c in self._constraints))
def __str__(self):
return f"{', '.join([str(c) for c in self._constraints[:(- 1)]])} or {self._constraints[(- 1)]}" |
def patch_runtime():
from .runtime import BlockReference, Macro
BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
Macro._invoke = wrap_macro_invoke(Macro._invoke) |
class Sequence(object):
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def on_epoch_end(self):
pass |
def build_default_pretrains(default_treebanks):
default_pretrains = dict(default_treebanks)
for lang in no_pretrain_languages:
default_pretrains.pop(lang, None)
for lang in specific_default_pretrains.keys():
default_pretrains[lang] = specific_default_pretrains[lang]
return default_pretrains |
def test_rosetta():
try:
problem = flexs.landscapes.rosetta.registry()['3msi']
landscape = flexs.landscapes.RosettaFolding(**problem['params'])
seq_length = len(landscape.wt_pose.sequence())
test_seqs = s_utils.generate_random_sequences(seq_length, 100, s_utils.AAS)
landscape.get_fitness(test_seqs)
except ImportError:
warnings.warn('Skipping RosettaFolding landscape test since PyRosetta not installed.') |
def pad_lr(x, fsize, fshift):
M = num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = (len(x) + (2 * pad))
r = ((((M - 1) * fshift) + fsize) - T)
return (pad, (pad + r)) |
class TestModels(TestCase):
keep_initializers_as_inputs = False
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
def exportTest(self, model, inputs, rtol=0.01, atol=1e-07):
with torch.onnx.select_model_mode_for_export(model, None):
graph = torch.onnx.utils._trace(model, inputs, OperatorExportTypes.ONNX)
torch._C._jit_pass_lint(graph)
verify(model, inputs, backend, rtol=rtol, atol=atol)
def test_ops(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(DummyNet()), toC(x))
def test_prelu(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(PReluNet(), x)
()
def test_concat(self):
input_a = Variable(torch.randn(BATCH_SIZE, 3))
input_b = Variable(torch.randn(BATCH_SIZE, 3))
inputs = ((toC(input_a), toC(input_b)),)
self.exportTest(toC(ConcatNet()), inputs)
def test_permute(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 10, 12))
self.exportTest(PermuteNet(), x)
()
def test_embedding_sequential_1(self):
x = Variable(torch.randint(0, 10, (BATCH_SIZE, 3)))
self.exportTest(EmbeddingNetwork1(), x)
()
def test_embedding_sequential_2(self):
x = Variable(torch.randint(0, 10, (BATCH_SIZE, 3)))
self.exportTest(EmbeddingNetwork2(), x)
('This model takes too much memory')
def test_srresnet(self):
x = Variable(torch.randn(1, 3, 224, 224).fill_(1.0))
self.exportTest(toC(SRResNet(rescale_factor=4, n_filters=64, n_blocks=8)), toC(x))
def test_super_resolution(self):
x = Variable(torch.randn(BATCH_SIZE, 1, 224, 224).fill_(1.0))
self.exportTest(toC(SuperResolutionNet(upscale_factor=3)), toC(x), atol=1e-06)
def test_alexnet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(alexnet()), toC(x))
()
def test_mnist(self):
x = Variable(torch.randn(BATCH_SIZE, 1, 28, 28).fill_(1.0))
self.exportTest(toC(MNIST()), toC(x))
('This model takes too much memory')
def test_vgg16(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg16()), toC(x))
('This model takes too much memory')
def test_vgg16_bn(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg16_bn()), toC(x))
('This model takes too much memory')
def test_vgg19(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg19()), toC(x))
('This model takes too much memory')
def test_vgg19_bn(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg19_bn()), toC(x))
def test_resnet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(resnet50()), toC(x), atol=1e-06)
()
def test_inception(self):
x = Variable((torch.randn(BATCH_SIZE, 3, 299, 299) + 1.0))
self.exportTest(toC(inception_v3()), toC(x))
def test_squeezenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
sqnet_v1_0 = SqueezeNet(version=1.1)
self.exportTest(toC(sqnet_v1_0), toC(x))
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
sqnet_v1_1 = SqueezeNet(version=1.1)
self.exportTest(toC(sqnet_v1_1), toC(x))
()
def test_densenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(densenet121()), toC(x), rtol=0.01, atol=1e-05)
()
def test_dcgan_netD(self):
netD = _netD(1)
netD.apply(weights_init)
input = Variable(torch.Tensor(bsz, 3, imgsz, imgsz).normal_(0, 1))
self.exportTest(toC(netD), toC(input))
()
def test_dcgan_netG(self):
netG = _netG(1)
netG.apply(weights_init)
input = Variable(torch.Tensor(bsz, nz, 1, 1).normal_(0, 1))
self.exportTest(toC(netG), toC(input))
(10)
def test_fake_quant(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(FakeQuantNet()), toC(x))
(10)
def test_qat_resnet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
qat_resnet50 = resnet50()
qat_resnet50.qconfig = quantization.QConfig(activation=quantization.default_fake_quant, weight=quantization.default_fake_quant)
quantization.prepare_qat(qat_resnet50, inplace=True)
qat_resnet50.apply(torch.quantization.enable_observer)
qat_resnet50.apply(torch.quantization.enable_fake_quant)
_ = qat_resnet50(x)
for module in qat_resnet50.modules():
if isinstance(module, quantization.FakeQuantize):
module.calculate_qparams()
qat_resnet50.apply(torch.quantization.disable_observer)
self.exportTest(toC(qat_resnet50), toC(x))
()
def test_googlenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(googlenet()), toC(x), rtol=0.001, atol=1e-05)
def test_mnasnet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(mnasnet1_0()), toC(x), rtol=0.001, atol=1e-05)
def test_mobilenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(mobilenet_v2()), toC(x), rtol=0.001, atol=1e-05)
()
def test_shufflenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(shufflenet_v2_x1_0()), toC(x), rtol=0.001, atol=1e-05)
(11)
def test_fcn(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(fcn_resnet101()), toC(x), rtol=0.001, atol=1e-05)
(11)
def test_deeplab(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(deeplabv3_resnet101()), toC(x), rtol=0.001, atol=1e-05)
def test_r3d_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(r3d_18()), toC(x), rtol=0.001, atol=1e-05)
def test_mc3_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(mc3_18()), toC(x), rtol=0.001, atol=1e-05)
def test_r2plus1d_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(r2plus1d_18()), toC(x), rtol=0.001, atol=1e-05) |
class MIRNet(nn.Module):
def __init__(self, in_channels=3, out_channels=3, n_feat=64, kernel_size=3, stride=2, n_RRG=3, n_MSRB=2, height=3, width=2, bias=False):
super(MIRNet, self).__init__()
self.conv_in = nn.Conv2d(in_channels, n_feat, kernel_size=kernel_size, padding=((kernel_size - 1) // 2), bias=bias)
modules_body = [RRG(n_feat, n_MSRB, height, width, stride, bias) for _ in range(n_RRG)]
self.body = nn.Sequential(*modules_body)
self.conv_out = nn.Conv2d(n_feat, out_channels, kernel_size=kernel_size, padding=((kernel_size - 1) // 2), bias=bias)
def forward(self, x):
h = self.conv_in(x)
h = self.body(h)
h = self.conv_out(h)
h += x
return h |
def main():
args = parse_args()
print('Called with args:')
print(args)
if (not torch.cuda.is_available()):
sys.exit('Need a CUDA device to run the code.')
if (args.cuda or (cfg.NUM_GPUS > 0)):
cfg.CUDA = True
else:
raise ValueError('Need Cuda device to run !')
if (args.dataset == 'coco2017'):
cfg.TRAIN.DATASETS = ('coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 81
elif (args.dataset == 'keypoints_coco2017'):
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
elif (args.dataset == 'pascal_voc'):
cfg.TRAIN.DATASETS = ('voc_2007_train', 'voc_2007_val')
cfg.MODEL.NUM_CLASSES = 21
elif (args.dataset == 'pascal_voc_0712'):
cfg.TRAIN.DATASETS = ('voc_2007_train', 'voc_2007_val', 'voc_2012_train', 'voc_2012_val')
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset.startswith('vg'):
cfg.TRAIN.DATASETS = (('%s_train' % args.dataset),)
else:
raise ValueError('Unexpected args.dataset: {}'.format(args.dataset))
cfg_from_file(args.cfg_file)
if (args.set_cfgs is not None):
cfg_from_list(args.set_cfgs)
original_batch_size = (cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH)
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if (args.batch_size is None):
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert ((args.batch_size % cfg.NUM_GPUS) == 0), ('batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS))
cfg.TRAIN.IMS_PER_BATCH = (args.batch_size // cfg.NUM_GPUS)
effective_batch_size = (args.iter_size * args.batch_size)
print(('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size)))
print('Adaptive config changes:')
print((' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size)))
print((' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS)))
print((' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH)))
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= (args.batch_size / original_batch_size)
print('Adjust BASE_LR linearly according to batch_size change:\n BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
step_scale = (original_batch_size / effective_batch_size)
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map((lambda x: int(((x * step_scale) + 0.5))), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(((cfg.SOLVER.MAX_ITER * step_scale) + 0.5))
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n SOLVER.STEPS: {} --> {}\n SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS, old_max_iter, cfg.SOLVER.MAX_ITER))
if (cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN):
cfg.FPN.RPN_COLLECT_SCALE = (cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch)
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if (args.num_workers is not None):
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print(('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS))
if (args.optimizer is not None):
cfg.SOLVER.TYPE = args.optimizer
if (args.lr is not None):
cfg.SOLVER.BASE_LR = args.lr
if (args.lr_decay_gamma is not None):
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
if args.auto_resume:
misc_utils.infer_load_ckpt(args)
if (args.resume and ('model_final.pth' in args.load_ckpt)):
logging.info('model_final.pth exists; no need to train!')
return
timers['roidb'].tic()
(roidb, ratio_list, ratio_index) = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
train_size = ((roidb_size // args.batch_size) * args.batch_size)
batchSampler = BatchSampler(sampler=MinibatchSampler(ratio_list, ratio_index), batch_size=args.batch_size, drop_last=True)
dataset = RoiDataLoader(roidb, cfg.MODEL.NUM_CLASSES, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=batchSampler, num_workers=cfg.DATA_LOADER.NUM_THREADS, collate_fn=collate_minibatch)
maskRCNN = Generalized_RCNN()
if ('word_embeddings' in dataset._extra_info):
maskRCNN.Box_Outs.set_word_embedding(dataset._extra_info['word_embeddings'])
if cfg.MODEL.IGNORE_CLASSES:
if (cfg.MODEL.IGNORE_CLASSES == 'all'):
dataset._extra_info['all'] = (dataset._extra_info['source'] + dataset._extra_info['target'])
maskRCNN._ignore_classes = dataset._extra_info[cfg.MODEL.IGNORE_CLASSES]
maskRCNN.Box_Outs._ignore_classes = dataset._extra_info[cfg.MODEL.IGNORE_CLASSES]
if (cfg.MODEL.NUM_RELATIONS > 0):
maskRCNN.Rel_Outs.relationship_dict = dataset._extra_info['relationships']
if ((cfg.MODEL.NUM_RELATIONS > 0) and cfg.TRAIN.FIX_BACKBONE):
for (key, value) in maskRCNN.named_parameters():
if ('Rel_Outs' not in key):
value.requires_grad = False
if cfg.TRAIN.FIX_CLASSIFIER:
for param in maskRCNN.Box_Outs.cls_score.parameters():
param.requires_grad = False
if ((cfg.FAST_RCNN.LOSS_TYPE == 'max_margin') and cfg.TRAIN.FIX_BACKBONE):
for (key, value) in maskRCNN.named_parameters():
if ('cls_score.mlp' not in key):
value.requires_grad = False
if cfg.CUDA:
maskRCNN.cuda()
gn_param_nameset = set()
for (name, module) in maskRCNN.named_modules():
if isinstance(module, nn.GroupNorm):
gn_param_nameset.add((name + '.weight'))
gn_param_nameset.add((name + '.bias'))
gn_params = []
gn_param_names = []
bias_params = []
bias_param_names = []
nonbias_params = []
nonbias_param_names = []
nograd_param_names = []
proj_params = []
proj_param_names = []
for (key, value) in maskRCNN.named_parameters():
if value.requires_grad:
if ('bias' in key):
bias_params.append(value)
bias_param_names.append(key)
elif (key in gn_param_nameset):
gn_params.append(value)
gn_param_names.append(key)
elif ((cfg.FAST_RCNN.PROJ_LR_SCALE != 1) and ('cls_score.mlp' in key)):
proj_params.append(value)
proj_param_names.append(key)
else:
nonbias_params.append(value)
nonbias_param_names.append(key)
else:
nograd_param_names.append(key)
assert (((gn_param_nameset - set(nograd_param_names)) - set(bias_param_names)) == set(gn_param_names))
params = [{'params': nonbias_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}, {'params': bias_params, 'lr': (0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1)), 'lr_scale': 'lambda x: x * (cfg.SOLVER.BIAS_DOUBLE_LR + 1)', 'weight_decay': (cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0)}, {'params': gn_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}, {'params': proj_params, 'lr': 0, 'lr_scale': 'lambda x: x * cfg.FAST_RCNN.PROJ_LR_SCALE', 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}]
param_names = [nonbias_param_names, bias_param_names, gn_param_names, proj_param_names]
if (cfg.SOLVER.TYPE == 'SGD'):
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif (cfg.SOLVER.TYPE == 'Adam'):
optimizer = torch.optim.Adam(params)
if args.load_ckpt:
load_name = args.load_ckpt
logging.info('loading checkpoint %s', load_name)
checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = (checkpoint['step'] + 1)
if ('train_size' in checkpoint):
if (checkpoint['train_size'] != train_size):
print(('train_size value: %d different from the one in checkpoint: %d' % (train_size, checkpoint['train_size'])))
misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
batchSampler.sampler.load_state_dict(checkpoint.get('sampler', None))
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron:
logging.info('loading Detectron weights %s', args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr']
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'], minibatch=True)
args.run_name = (misc_utils.get_run_name(args) + '_step')
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if (not args.no_save):
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
tblogger = SummaryWriter(output_dir)
maskRCNN.train()
dataiterator = iter(dataloader)
CHECKPOINT_PERIOD = int((cfg.TRAIN.SNAPSHOT_ITERS / effective_batch_size))
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if (cfg.SOLVER.STEPS[i] >= args.start_step):
decay_steps_ind = i
break
if (decay_steps_ind is None):
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(args, args.disp_interval, (tblogger if (args.use_tfboard and (not args.no_save)) else None))
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
if (step < cfg.SOLVER.WARM_UP_ITERS):
method = cfg.SOLVER.WARM_UP_METHOD
if (method == 'constant'):
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif (method == 'linear'):
alpha = (step / cfg.SOLVER.WARM_UP_ITERS)
warmup_factor = ((cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha)) + alpha)
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = (cfg.SOLVER.BASE_LR * warmup_factor)
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert (lr == lr_new)
elif (step == cfg.SOLVER.WARM_UP_ITERS):
net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
lr = optimizer.param_groups[0]['lr']
assert (lr == cfg.SOLVER.BASE_LR)
if ((decay_steps_ind < len(cfg.SOLVER.STEPS)) and (step == cfg.SOLVER.STEPS[decay_steps_ind])):
logger.info('Decay the learning on step %d', step)
lr_new = (lr * cfg.SOLVER.GAMMA)
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert (lr == lr_new)
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if (key != 'roidb'):
input_data[key] = list(map(Variable, input_data[key]))
net_outputs = maskRCNN(**input_data)
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr)
if (((step + 1) % CHECKPOINT_PERIOD) == 0):
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer, dataiterator)
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer, final=True)
print('Checkpoint saved.')
except (RuntimeError, KeyboardInterrupt):
logger.info('Save ckpt on exception ...')
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer, dataiterator)
del dataiterator
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if (args.use_tfboard and (not args.no_save)):
tblogger.close() |
def test_multi_objective_set_normalized():
multi_cdv_tmp = MultiObjectiveCDV(analytical, max_empirical_losses=max_empirical_losses)
multi_cdv_tmp.set_normalized(True)
(final_loss, alphas) = multi_cdv_tmp.get_descent_vector(losses, gradient)
assert (final_loss.data == ((alphas[0] * max_empirical_loss_1) + (alphas[1] * max_empirical_loss_2)).data)
assert (alphas == alpha_norm_base) |
class SGDOptimizer(BaseOptimizer):
def _apply_dense(self, cache):
g_t = cache['g_t']
cache['s_t'] = (self.learning_rate * g_t)
return cache
def _apply_sparse(self, cache):
(g_t, idxs) = (cache['g_t'], cache['idxs'])
(idxs, idxs_) = tf.unique(idxs)
g_t_ = tf.unsorted_segment_sum(g_t, idxs_, tf.size(idxs))
cache['g_t'] = g_t_
cache['idxs'] = idxs
cache['s_t'] = (self.learning_rate * g_t_)
return cache |
def _print_net(net):
for i in net.external_input:
print('Input: {}'.format(i))
for i in net.external_output:
print('Output: {}'.format(i))
for op in net.op:
print('Op {}'.format(op.type))
for x in op.input:
print(' input: {}'.format(x))
for y in op.output:
print(' output: {}'.format(y)) |
class DenseBlock(nn.Module):
def __init__(self, h, kernel_size=(3, 3), depth=4):
super(DenseBlock, self).__init__()
self.h = h
self.depth = depth
self.dense_block = nn.ModuleList([])
for i in range(depth):
dil = (2 ** i)
dense_conv = nn.Sequential(nn.Conv2d((h.dense_channel * (i + 1)), h.dense_channel, kernel_size, dilation=(dil, 1), padding=get_padding_2d(kernel_size, (dil, 1))), nn.InstanceNorm2d(h.dense_channel, affine=True), nn.PReLU(h.dense_channel))
self.dense_block.append(dense_conv)
def forward(self, x):
skip = x
for i in range(self.depth):
x = self.dense_block[i](skip)
skip = torch.cat([x, skip], dim=1)
return x |
class PrimeNumbers_Inherits(PrimeNumbers_Abstract):
def __init__(self):
super().__init__()
self._populate_coercion_lists_(embedding=IntegerRing())
def __contains__(self, p):
return ((isinstance(p, self.element_class) and (p.parent() is self)) or (isinstance(p, Integer) and p.is_prime()))
def _from_integer_(self, p):
return self.element_class(self, p)
class Element(IntegerWrapper, PrimeNumbers_Abstract.Element):
def __init__(self, parent, p):
IntegerWrapper.__init__(self, parent, p) |
.torch
def test_can_get_windowed_sequence(sequential_dataset: SequentialDataset):
sd = TorchSequentialDataset(sequential_dataset, max_sequence_length=3, sliding_window_step=2, padding_value=(- 1))
assert (len(sd) == 6)
_compare_sequence(sd, 0, 'item_id', [(- 1), 0, 1])
_compare_sequence(sd, 1, 'item_id', [0, 2, 3])
_compare_sequence(sd, 2, 'item_id', [(- 1), (- 1), 1])
_compare_sequence(sd, 3, 'item_id', [3, 4, 5])
_compare_sequence(sd, 4, 'item_id', [1, 2, 3])
_compare_sequence(sd, 5, 'item_id', [0, 1, 2]) |
def isSession(timestamp1, timestamp2):
t1 = datetime.fromtimestamp(timestamp1)
t2 = datetime.fromtimestamp(timestamp2)
delta_sec = ((((t1 - t2).days * 24) * 3600) + (t1 - t2).seconds)
return (delta_sec < (30 * 60)) |
def load_op_dep_graph(fname):
with open(fname, 'r') as stream:
result = defaultdict(set)
for op in yaml.safe_load(stream):
op_name = canonical_name(op['name'])
for dep in op.get('depends', []):
dep_name = canonical_name(dep['name'])
result[op_name].add(dep_name)
return result |
def is_valid_parameter(object):
has_value = hasattr(object, 'value')
has_set_value = hasattr(object, 'set_value')
has_floating = hasattr(object, 'floating')
return (has_value and has_set_value and has_floating) |
_utils.test()
def test_func_default_value():
def bar(s, t=1):
return (s + t)
def foo() -> ti.i32:
return bar(1)
assert (foo() == 2) |
def process_grouped_by_first_item(lst):
groups = defaultdict(list)
started = False
last_group = None
for (first, *rest) in lst:
rest = (rest[0] if (len(rest) == 1) else rest)
if (started and (first != last_group)):
(yield (last_group, groups[last_group]))
assert (first not in groups), f'{first} seen earlier --- violates precondition.'
groups[first].append(rest)
last_group = first
started = True
return groups |
def train(args):
set_seed(args.seed)
print('initializing model')
config = AutoConfig.from_pretrained(args.model)
config.gradient_checkpointing = True
config.use_cache = False
model = AutoModelForCausalLM.from_pretrained(args.model, config=config)
model.train()
model.gradient_checkpointing_enable()
print('initializing deepspeed')
model_parameters = list(filter((lambda p: p.requires_grad), model.parameters()))
(model_engine, optimizer, _, _) = deepspeed.initialize(config=args.deepspeed_config, model=model, model_parameters=model_parameters)
torch.cuda.empty_cache()
print('starting training')
input_ids = torch.randint(low=0, high=10, size=[args.deepspeed_config['train_micro_batch_size_per_gpu'], 1024], dtype=torch.int64).cuda()
for step in range((args.opt_steps_train + 1)):
loss = model_engine(input_ids=input_ids, labels=input_ids).loss
model_engine.backward(loss)
model_engine.step()
print(f'{step} {loss:8.3f}') |
def digraph_one_root():
classifier = HierarchicalClassifier()
classifier.logger_ = logging.getLogger('HC')
classifier.hierarchy_ = nx.DiGraph([('a', 'b'), ('b', 'c'), ('c', 'd')])
return classifier |
class BioGptForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def verify_task_dof_maps(dof_maps, id_map, field, use_expand_dofs=False, verbose=False):
timer = Timer(start=True)
if verbose:
output('verifying...')
output('total number of DOFs:', field.n_nod)
output('number of tasks:', len(dof_maps))
count = count2 = 0
dofs = []
if use_expand_dofs:
vec = nm.empty((field.n_nod * field.n_components), dtype=nm.float64)
else:
vec = nm.empty(field.n_nod, dtype=nm.float64)
for (ir, dof_map) in ordered_iteritems(dof_maps):
n_owned = dof_map[3]
offset = dof_map[4]
o2 = (offset + n_owned)
if verbose:
output(('task %d: %d owned on offset %d' % (ir, n_owned, offset)))
if (not use_expand_dofs):
aux = dof_map[0]
assert_(nm.all(((id_map[aux] >= offset) & (id_map[aux] < o2))))
count2 += dof_map[3]
count += len(dof_map[0])
dofs.append(dof_map[0])
vec[dof_map[0]] = ir
for aux in dof_map[1]:
if (not use_expand_dofs):
assert_(nm.all(((id_map[aux] >= offset) & (id_map[aux] < o2))))
count += len(aux)
dofs.append(aux)
vec[aux] = ir
dofs = nm.concatenate(dofs)
n_dof = vec.shape[0]
assert_((n_dof == len(dofs)))
if (not expand_dofs):
assert_(nm.all((nm.sort(dofs) == nm.sort(id_map))))
dofs = nm.unique(dofs)
assert_((n_dof == len(dofs)))
assert_((n_dof == (dofs[(- 1)] + 1)))
assert_((n_dof == count))
assert_((n_dof == count2))
assert_((n_dof == len(id_map)))
assert_((n_dof == len(nm.unique(id_map))))
output('...done in', timer.stop(), verbose=verbose)
return vec |
def composite(image1, image2, mask):
image = image2.copy()
image.paste(image1, None, mask)
return image |
def auto_augment_policy(name='original'):
hparams = _HPARAMS_DEFAULT
if (name == 'original'):
return auto_augment_policy_original(hparams)
elif (name == 'originalr'):
return auto_augment_policy_originalr(hparams)
elif (name == 'v0'):
return auto_augment_policy_v0(hparams)
elif (name == 'v0r'):
return auto_augment_policy_v0r(hparams)
else:
assert False, ('Unknown AA policy (%s)' % name) |
def register_Ns3TransmissionModesLayers_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TransmissionModesLayers const &', 'arg0')])
cls.add_method('TxMode2LayerNum', 'uint8_t', [param('uint8_t', 'txMode')], is_static=True)
return |
class L2Loss(_Loss):
logger = logging.getLogger()
def forward(self, state_a, state_b):
if (type(state_a) is tuple):
losses = 0.0
for (s_a, s_b) in zip(state_a, state_b):
losses += torch.pow((s_a - s_b), 2)
else:
losses = torch.pow((state_a - state_b), 2)
return torch.mean(losses) |
class BitConfig(BackboneConfigMixin, PretrainedConfig):
model_type = 'bit'
layer_types = ['preactivation', 'bottleneck']
supported_padding = ['SAME', 'VALID']
def __init__(self, num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type='preactivation', hidden_act='relu', global_padding=None, num_groups=32, drop_path_rate=0.0, embedding_dynamic_padding=False, output_stride=32, width_factor=1, out_features=None, out_indices=None, **kwargs):
super().__init__(**kwargs)
if (layer_type not in self.layer_types):
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
if (global_padding is not None):
if (global_padding.upper() in self.supported_padding):
global_padding = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported')
self.num_channels = num_channels
self.embedding_size = embedding_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.layer_type = layer_type
self.hidden_act = hidden_act
self.global_padding = global_padding
self.num_groups = num_groups
self.drop_path_rate = drop_path_rate
self.embedding_dynamic_padding = embedding_dynamic_padding
self.output_stride = output_stride
self.width_factor = width_factor
self.stage_names = (['stem'] + [f'stage{idx}' for idx in range(1, (len(depths) + 1))])
(self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) |
class HandBlockEnv(ManipulateEnv):
def __init__(self, max_step=100, target_position='random', target_rotation='xyz', reward_type='sparse', distance_threshold=0.01, rotation_threshold=0.1):
self.num_step = 0
self.max_step = max_step
super(HandBlockEnv, self).__init__(model_path=MANIPULATE_BLOCK_XML, target_position=target_position, target_rotation=target_rotation, target_position_range=np.array([((- 0.04), 0.04), ((- 0.06), 0.02), (0.0, 0.06)]), reward_type='sparse', distance_threshold=distance_threshold, rotation_threshold=rotation_threshold)
def step(self, action):
(obs, reward, _, info) = super().step(action)
self.num_step += 1
done = (True if (self.num_step >= self.max_step) else False)
return (obs, reward, done, info)
def reset(self):
obs = super().reset()
self.num_step = 0
return obs |
def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True):
(batch_size, channels, img_h, img_w) = img.shape
if (grid_w is None):
grid_w = (batch_size // grid_h)
assert (batch_size == (grid_w * grid_h))
if float_to_uint8:
img = ((img * 127.5) + 128).clamp(0, 255).to(torch.uint8)
img = img.reshape(grid_h, grid_w, channels, img_h, img_w)
img = img.permute(2, 0, 3, 1, 4)
img = img.reshape(channels, (grid_h * img_h), (grid_w * img_w))
if chw_to_hwc:
img = img.permute(1, 2, 0)
if to_numpy:
img = img.cpu().numpy()
return img |
.parametrize('tifunc,npfunc', [((lambda x: ti.sqrt(x)), (lambda x: np.sqrt(x))), ((lambda x: ti.rsqrt(x)), (lambda x: (1 / np.sqrt(x)))), ((lambda x: ti.exp(x)), (lambda x: np.exp(x))), ((lambda x: ti.log(x)), (lambda x: np.log(x)))])
_has_autograd
_utils.test()
def test_unary(tifunc, npfunc):
grad_test(tifunc, npfunc)
grad_test_fwd(tifunc, npfunc) |
def test_count_string_tokens_empty_input():
assert (count_string_tokens('', model_name='gpt-3.5-turbo-0301') == 0) |
def test_skipper():
def f():
pass
class c():
def __init__(self):
self.me = 'I think, therefore...'
docstring = ' Header\n\n >>> something # skip if not HAVE_AMODULE\n >>> something + else\n >>> a = 1 # skip if not HAVE_BMODULE\n >>> something2 # skip if HAVE_AMODULE\n '
f.__doc__ = docstring
c.__doc__ = docstring
global HAVE_AMODULE, HAVE_BMODULE
HAVE_AMODULE = False
HAVE_BMODULE = True
f2 = doctest_skip_parser(f)
c2 = doctest_skip_parser(c)
assert (f is f2)
assert (c is c2)
expected = ' Header\n\n >>> something # doctest: +SKIP\n >>> something + else\n >>> a = 1\n >>> something2\n '
assert_equal(f2.__doc__, expected)
assert_equal(c2.__doc__, expected)
HAVE_AMODULE = True
HAVE_BMODULE = False
f.__doc__ = docstring
c.__doc__ = docstring
f2 = doctest_skip_parser(f)
c2 = doctest_skip_parser(c)
assert (f is f2)
expected = ' Header\n\n >>> something\n >>> something + else\n >>> a = 1 # doctest: +SKIP\n >>> something2 # doctest: +SKIP\n '
assert_equal(f2.__doc__, expected)
assert_equal(c2.__doc__, expected)
del HAVE_AMODULE
f.__doc__ = docstring
c.__doc__ = docstring
with testing.raises(NameError):
doctest_skip_parser(f)
with testing.raises(NameError):
doctest_skip_parser(c) |
def tensorize(arr):
ret = torch.from_numpy(arr).float().cuda()
if (len(arr.shape) == 1):
ret = ret.reshape((- 1), 1)
return ret |
def extract_embeddings_vggish(annotation_path, dataset_dir, output_dir, vggish_resource_dir, frame_duration=0.96, hop_duration=0.96, progress=True, vggish_embedding_size=128):
print('* Loading annotations.')
annotation_data = pd.read_csv(annotation_path).sort_values('audio_filename')
extract_vggish_embedding = make_extract_vggish_embedding(frame_duration, hop_duration, input_op_name='vggish/input_features', output_op_name='vggish/embedding', resources_dir=vggish_resource_dir, embedding_size=vggish_embedding_size)
next(extract_vggish_embedding)
out_dir = os.path.join(output_dir, 'vggish')
os.makedirs(out_dir, exist_ok=True)
df = annotation_data[['split', 'audio_filename']].drop_duplicates()
row_iter = df.iterrows()
if progress:
row_iter = tqdm(row_iter, total=len(df))
print('* Extracting embeddings.')
for (_, row) in row_iter:
filename = row['audio_filename']
split_str = row['split']
audio_path = os.path.join(dataset_dir, split_str, filename)
emb_path = os.path.join(out_dir, (os.path.splitext(filename)[0] + '.npy.gz'))
extract_vggish_embedding.send((audio_path, emb_path))
extract_vggish_embedding.close() |
def main(args):
base_path = f'{args.dpath}/image'
if (not os.path.exists(base_path)):
assert f"args.dpath ({args.dpath}) must contain 'image' directory"
base_opath = f'{args.dpath}/mask'
os.makedirs(base_opath, exist_ok=True)
fpaths = glob.glob(f'{base_path}/*')
for fpath in fpaths:
print(f'Background matting {fpath}...')
input = cv2.imread(fpath)
output = remove(input, alpha_matting=True, only_mask=True, post_process_mask=True)
path = Path(fpath)
opath = f'{base_opath}/{path.name}.png'
cv2.imwrite(opath, output) |
def _file_rendezvous_handler(url, **kwargs):
def _error(msg):
return _rendezvous_error(('file:// rendezvous: ' + msg))
result = urlparse(url)
path = result.path
if (sys.platform == 'win32'):
import urllib.request
path = urllib.request.url2pathname(result.path)
if (not path):
raise _error('path missing')
query = dict((pair.split('=') for pair in filter(None, result.query.split('&'))))
if ('rank' not in query):
raise _error('rank parameter missing')
if ('world_size' not in query):
raise _error('world size parameter missing')
rank = int(query['rank'])
world_size = int(query['world_size'])
store = FileStore(path, world_size)
(yield (store, rank, world_size))
raise RuntimeError('Unable to perform rerendezvous using file:// method') |
def calculate_contrastive_empowerment(discriminator, obs, next_obs, latents, num_prior_samples=512, distribution_type='uniform', split_group=(4096 * 32), obs_mean=None, obs_std=None, return_diagnostics=False, prior=None):
discriminator.eval()
if (obs_mean is not None):
obs = ((obs - obs_mean) / (obs_std + 1e-06))
next_obs = ((next_obs - obs_mean) / (obs_std + 1e-06))
obs_altz = np.concatenate(([obs] * num_prior_samples), axis=0)
with torch.no_grad():
logp = discriminator.get_log_prob(ptu.from_numpy(obs), ptu.from_numpy(latents), ptu.from_numpy(next_obs))
logp = ptu.get_numpy(logp)
if (distribution_type == 'uniform'):
latent_altz = np.random.uniform(low=(- 1), high=1, size=(obs_altz.shape[0], latents.shape[1]))
elif (distribution_type == 'prior'):
if (prior is None):
raise AssertionError('prior specified but not passed in')
obs_t = ptu.from_numpy(obs_altz)
(latent_altz, *_) = prior.get_action(obs_t, deterministic=False)
else:
raise NotImplementedError('distribution_type not found')
next_obs_altz = np.concatenate(([next_obs] * num_prior_samples), axis=0)
with torch.no_grad():
if (obs_altz.shape[0] <= split_group):
logp_altz = ptu.get_numpy(discriminator.get_log_prob(ptu.from_numpy(obs_altz), ptu.from_numpy(latent_altz), ptu.from_numpy(next_obs_altz)))
else:
logp_altz = []
for split_idx in range((obs_altz.shape[0] // split_group)):
start_split = (split_idx * split_group)
end_split = ((split_idx + 1) * split_group)
logp_altz.append(ptu.get_numpy(discriminator.get_log_prob(ptu.from_numpy(obs_altz[start_split:end_split]), ptu.from_numpy(latent_altz[start_split:end_split]), ptu.from_numpy(next_obs_altz[start_split:end_split]))))
if (obs_altz.shape[0] % split_group):
start_split = (obs_altz.shape[0] % split_group)
logp_altz.append(ptu.get_numpy(discriminator.get_log_prob(ptu.from_numpy(obs_altz[(- start_split):]), ptu.from_numpy(latent_altz[(- start_split):]), ptu.from_numpy(next_obs_altz[(- start_split):]))))
logp_altz = np.concatenate(logp_altz)
logp_altz = np.array(np.array_split(logp_altz, num_prior_samples))
if return_diagnostics:
diagnostics = dict()
orig_rep = np.repeat(np.expand_dims(logp, axis=0), axis=0, repeats=num_prior_samples)
diagnostics['Pct Random Skills > Original'] = (orig_rep < logp_altz).mean()
intrinsic_reward = (np.log((num_prior_samples + 1)) - np.log((1 + np.exp(np.clip((logp_altz - logp.reshape(1, (- 1))), (- 50), 50)).sum(axis=0))))
if (not return_diagnostics):
return (intrinsic_reward, (logp, logp_altz, (logp - intrinsic_reward)))
else:
return (intrinsic_reward, (logp, logp_altz, (logp - intrinsic_reward)), diagnostics) |
def GetBfsTree_PNGraph(Graph, StartNId, FollowOut, FollowIn):
return _snap.GetBfsTree_PNGraph(Graph, StartNId, FollowOut, FollowIn) |
def get_padding_2d(kernel_size, dilation=(1, 1)):
return (int((((kernel_size[0] * dilation[0]) - dilation[0]) / 2)), int((((kernel_size[1] * dilation[1]) - dilation[1]) / 2))) |
def load_exp_data(exp_path):
exp_data = None
try:
params_json = load_json(os.path.join(exp_path, 'variant.json'))
progress_csv_path = os.path.join(exp_path, 'progress.csv')
pkl_paths = [os.path.join(exp_path, 'offline_itr_2000.pt')]
exp_data = dict(csv=progress_csv_path, json=params_json, pkl=pkl_paths, exp_name=exp_path)
except IOError as e:
print(e)
return exp_data |
class BartTokenizerFast(RobertaTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = BartTokenizer |
def _test_size_in_bytes():
a = ti.ndarray(ti.i32, 8)
assert (a._get_element_size() == 4)
assert (a._get_nelement() == 8)
b = ti.Vector.ndarray(10, ti.f64, 5)
assert (b._get_element_size() == 80)
assert (b._get_nelement() == 5) |
class Partition0(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[0]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[1]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[2]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[3]']
TENSORS = ['T5ForConditionalGeneration/Parameter[shared_embed_weight]']
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.embed_tokens', 'l_1': 'encoder.dropout', 'l_2': 'encoder.block.0', 'l_3': 'encoder.block.1', 'l_4': 'encoder.block.2', 'l_5': 'encoder.block.3', 'p_0': 'shared_embed_weight'}
self.to(self.device)
def forward(self, *args):
(attention_mask, decoder_input_ids, input_ids) = unflatten(args, self.input_structure)
t_0 = decoder_input_ids.size()
t_1 = input_ids.size()
t_1 = t_1[(- 1)]
t_1 = input_ids.view((- 1), t_1)
t_1 = self.l_0(self.p_0, t_1)
t_1 = self.l_1(t_1)
t_2 = attention_mask[(slice(None, None, None), None, None, slice(None, None, None))]
t_2 = t_2.to(dtype=torch.float32)
t_2 = (1.0 - t_2)
t_2 = (t_2 * (- 10000.0))
t_1 = self.l_2(t_1, attention_mask=t_2, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_1 = self.l_3(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_1 = self.l_4(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
t_1 = self.l_5(t_3, attention_mask=t_2, position_bias=t_1, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False)
t_3 = t_1[slice(None, 2, None)]
t_3 = t_3[0]
t_1 = t_1[2]
return list(flatten((t_0, self.p_0, t_2, t_3, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum((mass * xpos), 0) / np.sum(mass))[0] |
_OUTPUTS.register('conv1x1_outputs')
class Conv1x1Outputs(nn.Module):
def __init__(self, cfg, dim_in, spatial_in):
super().__init__()
self.dim_in = dim_in[(- 1)]
self.spatial_in = spatial_in
self.classify = nn.Conv2d(self.dim_in, cfg.MASK.NUM_CLASSES, kernel_size=1, stride=1, padding=0)
self.dim_out = [cfg.MASK.NUM_CLASSES]
self.spatial_out = [1.0]
def forward(self, x, labels=None):
x = x[(- 1)]
if (labels is None):
labels = torch.zeros(x.shape[0]).long()
x = self.classify(x)
x = x[(range(len(labels)), labels)].unsqueeze(1)
up_scale = int((1 / self.spatial_in[0]))
if (up_scale > 1):
x = F.interpolate(x, scale_factor=up_scale, mode='bilinear', align_corners=False)
return [x] |
class WorldConstants():
ROBOT_ID = 1
FLOOR_ID = 2
STAGE_ID = 3
FLOOR_HEIGHT = 0.011
ROBOT_HEIGHT = 0.34
ARENA_BB = np.array([[(- 0.15), (- 0.15), 0], [0.15, 0.15, 0.3]])
LINK_IDS = {'robot_finger_60_link_0': 1, 'robot_finger_60_link_1': 2, 'robot_finger_60_link_2': 3, 'robot_finger_60_link_3': 4, 'robot_finger_120_link_0': 6, 'robot_finger_120_link_1': 7, 'robot_finger_120_link_2': 8, 'robot_finger_120_link_3': 9, 'robot_finger_300_link_0': 11, 'robot_finger_300_link_1': 12, 'robot_finger_300_link_2': 13, 'robot_finger_300_link_3': 14}
VISUAL_SHAPE_IDS = {'robot_finger_60_link_0': 0, 'robot_finger_60_link_1': 4, 'robot_finger_60_link_2': 5, 'robot_finger_60_link_3': 6, 'robot_finger_120_link_0': 7, 'robot_finger_120_link_1': 11, 'robot_finger_120_link_2': 12, 'robot_finger_120_link_3': 13, 'robot_finger_300_link_0': 14, 'robot_finger_300_link_1': 15, 'robot_finger_300_link_2': 16, 'robot_finger_300_link_3': 17}
JOINT_NAMES = ['finger_upper_link_0', 'finger_middle_link_0', 'finger_lower_link_0', 'finger_upper_link_120', 'finger_middle_link_120', 'finger_lower_link_120', 'finger_upper_link_240', 'finger_middle_link_240', 'finger_lower_link_240']
TIP_LINK_NAMES = ['finger_tip_link_0', 'finger_tip_link_120', 'finger_tip_link_240'] |
def test_anntorchdataset_getitem_pro_exp(adata):
adata.obsm['protein_expression'] = pd.DataFrame(adata.obsm['protein_expression'], index=adata.obs_names)
adata_manager = generic_setup_adata_manager(adata, batch_key='batch', protein_expression_obsm_key='protein_expression')
bd = AnnTorchDataset(adata_manager)
for value in bd[1].values():
assert (type(value) == np.ndarray) |
def main():
args = get_args()
device = ('cuda' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.Scale()])
transform = transforms.Compose([transforms.Scale((512, 512, 3)), transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
G = Generator().to(device)
D = Discriminator().to(device)
loss_func = nn.MSELoss()
G_optim = torch.optim.adam(G.parameters(), lr=0.01)
D_optim = torch.optim.adam(D.parameters(), lr=0.01)
epochs = 500
import tqdm
for epoch in tqdm(range(epochs)):
for (batch_idx, (x, labels)) in tqdm(enumerate(train_loader)):
z = init.normal(torch.Tensor(batch_size, 100), mean=0, std=0.1).to(device)
(D_z, _) = D(G(z))
(D_x, _) = D(x)
zeros = torch.zeros(batch_size, 1).to(device)
D_loss = (torch.sum(loss_func(D_z, zeros)) + torch.sum(loss_func(D_imgs, ones_label)))
D_optim.zero_grad()
D_loss.backward()
D_optim.step()
z = init.normal(torch.Tensor(batch_size, 100), mean=0, std=0.1).to(device)
G_z = G(z)
D_z = D(G_z)
ones = torch.ones(batch_size, 1)
G_loss = torch.sum(loss_func(D_z, ones))
G_optim.zero_grad()
G_loss.backward(retain_graph=True)
G_optim.step() |
def train_imgf(opt):
model = img2state(opt).cuda()
dataset = Robotdata.get_loader(opt)
optimizer = torch.optim.Adam(model.parameters())
loss_fn = nn.MSELoss()
for epoch in range(50):
for (i, item) in enumerate(dataset):
(state, action, result) = item[0]
state = state.float().cuda()
action = action.float().cuda()
result = result.float().cuda()
out = model(state, action)
loss = (loss_fn(out, result) * 100)
print(epoch, i, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.save(model.state_dict(), './imgpred.pth')
merge_img = []
for (before, after, pred) in zip(state, result, out):
img = torch.cat([before, after, pred], 1)
merge_img.append(img)
merge_img = torch.cat(merge_img, 2).cpu()
merge_img = ((merge_img + 1) / 2)
img = transforms.ToPILImage()(merge_img)
img.save(os.path.join('./tmp/imgpred', 'img_{}.jpg'.format(epoch))) |
def test_control_newton_cr_multiple(state_forms, bcs_list, J, states, controls, adjoints, config_ocp):
config_ocp.set('AlgoTNM', 'inner_newton', 'cr')
ocp = cashocs.OptimalControlProblem(state_forms, bcs_list, J, states, controls, adjoints, config=config_ocp)
ocp.solve(algorithm='newton', rtol=0.01, atol=0.0, max_iter=2)
assert (ocp.solver.relative_norm <= 0.0001) |
_tokenizer('space', dataclass=FairseqDataclass)
class SpaceTokenizer(object):
def __init__(self, *unused):
self.space_tok = re.compile('\\s+')
def encode(self, x: str) -> str:
return self.space_tok.sub(' ', x)
def decode(self, x: str) -> str:
return x |
class Mixed_4c(nn.Module):
def __init__(self):
super(Mixed_4c, self).__init__()
self.branch0 = nn.Sequential(BasicConv3d(512, 160, kernel_size=1, stride=1))
self.branch1 = nn.Sequential(BasicConv3d(512, 112, kernel_size=1, stride=1), SepConv3d(112, 224, kernel_size=3, stride=1, padding=1))
self.branch2 = nn.Sequential(BasicConv3d(512, 24, kernel_size=1, stride=1), SepConv3d(24, 64, kernel_size=3, stride=1, padding=1))
self.branch3 = nn.Sequential(nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1), BasicConv3d(512, 64, kernel_size=1, stride=1))
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out |
class EfficientDMC():
def __init__(self, clusterings: List[Clustering], measure_type='mutual_info'):
self.clusterings = clusterings
self.eps = 1e-20
def init_cache(self):
P = len(self.combinations)
C = self.clusterings[0].ncentroids
N = torch.full((P, C), self.eps)
n = N.sum(dim=[(- 1), (- 2)])
self.cache = {'N': N, 'n': n} |
def LF_nonunion(c):
complication = c.complication.get_span().lower()
v = ((complication == 'nonunion') or (complication == 'non-union'))
return ((- 1) if v else 0) |
class TwoAFCDataset(BaseDataset):
def initialize(self, dataroots, load_size=64):
if (not isinstance(dataroots, list)):
dataroots = [dataroots]
self.roots = dataroots
self.load_size = load_size
self.dir_ref = [os.path.join(root, 'ref') for root in self.roots]
self.ref_paths = make_dataset(self.dir_ref)
self.ref_paths = sorted(self.ref_paths)
self.dir_p0 = [os.path.join(root, 'p0') for root in self.roots]
self.p0_paths = make_dataset(self.dir_p0)
self.p0_paths = sorted(self.p0_paths)
self.dir_p1 = [os.path.join(root, 'p1') for root in self.roots]
self.p1_paths = make_dataset(self.dir_p1)
self.p1_paths = sorted(self.p1_paths)
transform_list = []
transform_list.append(transforms.Scale(load_size))
transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
self.transform = transforms.Compose(transform_list)
self.dir_J = [os.path.join(root, 'judge') for root in self.roots]
self.judge_paths = make_dataset(self.dir_J, mode='np')
self.judge_paths = sorted(self.judge_paths)
def __getitem__(self, index):
p0_path = self.p0_paths[index]
p0_img_ = Image.open(p0_path).convert('RGB')
p0_img = self.transform(p0_img_)
p1_path = self.p1_paths[index]
p1_img_ = Image.open(p1_path).convert('RGB')
p1_img = self.transform(p1_img_)
ref_path = self.ref_paths[index]
ref_img_ = Image.open(ref_path).convert('RGB')
ref_img = self.transform(ref_img_)
judge_path = self.judge_paths[index]
judge_img = np.load(judge_path).reshape((1, 1, 1))
judge_img = torch.FloatTensor(judge_img)
return {'p0': p0_img, 'p1': p1_img, 'ref': ref_img, 'judge': judge_img, 'p0_path': p0_path, 'p1_path': p1_path, 'ref_path': ref_path, 'judge_path': judge_path}
def __len__(self):
return len(self.p0_paths) |
class Scanner():
def __init__(self):
self.done = False
self.flow_level = 0
self.tokens = []
self.fetch_stream_start()
self.tokens_taken = 0
self.indent = (- 1)
self.indents = []
self.allow_simple_key = True
self.possible_simple_keys = {}
def check_token(self, *choices):
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if (not choices):
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
else:
return None
def get_token(self):
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
def need_more_tokens(self):
if self.done:
return False
if (not self.tokens):
return True
self.stale_possible_simple_keys()
if (self.next_possible_simple_key() == self.tokens_taken):
return True
def fetch_more_tokens(self):
self.scan_to_next_token()
self.stale_possible_simple_keys()
self.unwind_indent(self.column)
ch = self.peek()
if (ch == '\x00'):
return self.fetch_stream_end()
if ((ch == '%') and self.check_directive()):
return self.fetch_directive()
if ((ch == '-') and self.check_document_start()):
return self.fetch_document_start()
if ((ch == '.') and self.check_document_end()):
return self.fetch_document_end()
if (ch == '['):
return self.fetch_flow_sequence_start()
if (ch == '{'):
return self.fetch_flow_mapping_start()
if (ch == ']'):
return self.fetch_flow_sequence_end()
if (ch == '}'):
return self.fetch_flow_mapping_end()
if (ch == ','):
return self.fetch_flow_entry()
if ((ch == '-') and self.check_block_entry()):
return self.fetch_block_entry()
if ((ch == '?') and self.check_key()):
return self.fetch_key()
if ((ch == ':') and self.check_value()):
return self.fetch_value()
if (ch == '*'):
return self.fetch_alias()
if (ch == '&'):
return self.fetch_anchor()
if (ch == '!'):
return self.fetch_tag()
if ((ch == '|') and (not self.flow_level)):
return self.fetch_literal()
if ((ch == '>') and (not self.flow_level)):
return self.fetch_folded()
if (ch == "'"):
return self.fetch_single()
if (ch == '"'):
return self.fetch_double()
if self.check_plain():
return self.fetch_plain()
raise ScannerError('while scanning for the next token', None, ('found character %r that cannot start any token' % ch), self.get_mark())
def next_possible_simple_key(self):
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if ((min_token_number is None) or (key.token_number < min_token_number)):
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
for level in list(self.possible_simple_keys):
key = self.possible_simple_keys[level]
if ((key.line != self.line) or ((self.index - key.index) > 1024)):
if key.required:
raise ScannerError('while scanning a simple key', key.mark, "could not find expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
required = ((not self.flow_level) and (self.indent == self.column))
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = (self.tokens_taken + len(self.tokens))
key = SimpleKey(token_number, required, self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
if (self.flow_level in self.possible_simple_keys):
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError('while scanning a simple key', key.mark, "could not find expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
def unwind_indent(self, column):
if self.flow_level:
return
while (self.indent > column):
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
if (self.indent < column):
self.indents.append(self.indent)
self.indent = column
return True
return False
def fetch_stream_start(self):
mark = self.get_mark()
self.tokens.append(StreamStartToken(mark, mark, encoding=self.encoding))
def fetch_stream_end(self):
self.unwind_indent((- 1))
self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
mark = self.get_mark()
self.tokens.append(StreamEndToken(mark, mark))
self.done = True
def fetch_directive(self):
self.unwind_indent((- 1))
self.remove_possible_simple_key()
self.allow_simple_key = False
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
self.unwind_indent((- 1))
self.remove_possible_simple_key()
self.allow_simple_key = False
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
self.save_possible_simple_key()
self.flow_level += 1
self.allow_simple_key = True
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
self.remove_possible_simple_key()
self.flow_level -= 1
self.allow_simple_key = False
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
self.allow_simple_key = True
self.remove_possible_simple_key()
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
if (not self.flow_level):
if (not self.allow_simple_key):
raise ScannerError(None, None, 'sequence entries are not allowed here', self.get_mark())
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
else:
pass
self.allow_simple_key = True
self.remove_possible_simple_key()
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
if (not self.flow_level):
if (not self.allow_simple_key):
raise ScannerError(None, None, 'mapping keys are not allowed here', self.get_mark())
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
self.allow_simple_key = (not self.flow_level)
self.remove_possible_simple_key()
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
if (self.flow_level in self.possible_simple_keys):
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert((key.token_number - self.tokens_taken), KeyToken(key.mark, key.mark))
if (not self.flow_level):
if self.add_indent(key.column):
self.tokens.insert((key.token_number - self.tokens_taken), BlockMappingStartToken(key.mark, key.mark))
self.allow_simple_key = False
else:
if (not self.flow_level):
if (not self.allow_simple_key):
raise ScannerError(None, None, 'mapping values are not allowed here', self.get_mark())
if (not self.flow_level):
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
self.allow_simple_key = (not self.flow_level)
self.remove_possible_simple_key()
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
self.save_possible_simple_key()
self.allow_simple_key = False
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
self.save_possible_simple_key()
self.allow_simple_key = False
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
self.save_possible_simple_key()
self.allow_simple_key = False
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
self.allow_simple_key = True
self.remove_possible_simple_key()
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style="'")
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
self.save_possible_simple_key()
self.allow_simple_key = False
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
self.save_possible_simple_key()
self.allow_simple_key = False
self.tokens.append(self.scan_plain())
def check_directive(self):
if (self.column == 0):
return True
def check_document_start(self):
if (self.column == 0):
if ((self.prefix(3) == '---') and (self.peek(3) in '\x00 \t\r\n\x85\u2028\u2029')):
return True
def check_document_end(self):
if (self.column == 0):
if ((self.prefix(3) == '...') and (self.peek(3) in '\x00 \t\r\n\x85\u2028\u2029')):
return True
def check_block_entry(self):
return (self.peek(1) in '\x00 \t\r\n\x85\u2028\u2029')
def check_key(self):
if self.flow_level:
return True
else:
return (self.peek(1) in '\x00 \t\r\n\x85\u2028\u2029')
def check_value(self):
if self.flow_level:
return True
else:
return (self.peek(1) in '\x00 \t\r\n\x85\u2028\u2029')
def check_plain(self):
ch = self.peek()
return ((ch not in '\x00 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%`') or ((self.peek(1) not in '\x00 \t\r\n\x85\u2028\u2029') and ((ch == '-') or ((not self.flow_level) and (ch in '?:')))))
def scan_to_next_token(self):
if ((self.index == 0) and (self.peek() == '\ufeff')):
self.forward()
found = False
while (not found):
while (self.peek() == ' '):
self.forward()
if (self.peek() == '#'):
while (self.peek() not in '\x00\r\n\x85\u2028\u2029'):
self.forward()
if self.scan_line_break():
if (not self.flow_level):
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if (name == 'YAML'):
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif (name == 'TAG'):
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while (self.peek() not in '\x00\r\n\x85\u2028\u2029'):
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
length = 0
ch = self.peek(length)
while (('0' <= ch <= '9') or ('A' <= ch <= 'Z') or ('a' <= ch <= 'z') or (ch in '-_')):
length += 1
ch = self.peek(length)
if (not length):
raise ScannerError('while scanning a directive', start_mark, ('expected alphabetic or numeric character, but found %r' % ch), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if (ch not in '\x00 \r\n\x85\u2028\u2029'):
raise ScannerError('while scanning a directive', start_mark, ('expected alphabetic or numeric character, but found %r' % ch), self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
while (self.peek() == ' '):
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if (self.peek() != '.'):
raise ScannerError('while scanning a directive', start_mark, ("expected a digit or '.', but found %r" % self.peek()), self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if (self.peek() not in '\x00 \r\n\x85\u2028\u2029'):
raise ScannerError('while scanning a directive', start_mark, ("expected a digit or ' ', but found %r" % self.peek()), self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
ch = self.peek()
if (not ('0' <= ch <= '9')):
raise ScannerError('while scanning a directive', start_mark, ('expected a digit, but found %r' % ch), self.get_mark())
length = 0
while ('0' <= self.peek(length) <= '9'):
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
while (self.peek() == ' '):
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while (self.peek() == ' '):
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if (ch != ' '):
raise ScannerError('while scanning a directive', start_mark, ("expected ' ', but found %r" % ch), self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if (ch not in '\x00 \r\n\x85\u2028\u2029'):
raise ScannerError('while scanning a directive', start_mark, ("expected ' ', but found %r" % ch), self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
while (self.peek() == ' '):
self.forward()
if (self.peek() == '#'):
while (self.peek() not in '\x00\r\n\x85\u2028\u2029'):
self.forward()
ch = self.peek()
if (ch not in '\x00\r\n\x85\u2028\u2029'):
raise ScannerError('while scanning a directive', start_mark, ('expected a comment or a line break, but found %r' % ch), self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
start_mark = self.get_mark()
indicator = self.peek()
if (indicator == '*'):
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while (('0' <= ch <= '9') or ('A' <= ch <= 'Z') or ('a' <= ch <= 'z') or (ch in '-_')):
length += 1
ch = self.peek(length)
if (not length):
raise ScannerError(('while scanning an %s' % name), start_mark, ('expected alphabetic or numeric character, but found %r' % ch), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if (ch not in '\x00 \t\r\n\x85\u2028\u2029?:,]}%`'):
raise ScannerError(('while scanning an %s' % name), start_mark, ('expected alphabetic or numeric character, but found %r' % ch), self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
start_mark = self.get_mark()
ch = self.peek(1)
if (ch == '<'):
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if (self.peek() != '>'):
raise ScannerError('while parsing a tag', start_mark, ("expected '>', but found %r" % self.peek()), self.get_mark())
self.forward()
elif (ch in '\x00 \t\r\n\x85\u2028\u2029'):
handle = None
suffix = '!'
self.forward()
else:
length = 1
use_handle = False
while (ch not in '\x00 \r\n\x85\u2028\u2029'):
if (ch == '!'):
use_handle = True
break
length += 1
ch = self.peek(length)
handle = '!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = '!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if (ch not in '\x00 \r\n\x85\u2028\u2029'):
raise ScannerError('while scanning a tag', start_mark, ("expected ' ', but found %r" % ch), self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
if (style == '>'):
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
self.forward()
(chomping, increment) = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
min_indent = (self.indent + 1)
if (min_indent < 1):
min_indent = 1
if (increment is None):
(breaks, max_indent, end_mark) = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = ((min_indent + increment) - 1)
(breaks, end_mark) = self.scan_block_scalar_breaks(indent)
line_break = ''
while ((self.column == indent) and (self.peek() != '\x00')):
chunks.extend(breaks)
leading_non_space = (self.peek() not in ' \t')
length = 0
while (self.peek(length) not in '\x00\r\n\x85\u2028\u2029'):
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
(breaks, end_mark) = self.scan_block_scalar_breaks(indent)
if ((self.column == indent) and (self.peek() != '\x00')):
if (folded and (line_break == '\n') and leading_non_space and (self.peek() not in ' \t')):
if (not breaks):
chunks.append(' ')
else:
chunks.append(line_break)
else:
break
if (chomping is not False):
chunks.append(line_break)
if (chomping is True):
chunks.extend(breaks)
return ScalarToken(''.join(chunks), False, start_mark, end_mark, style)
def scan_block_scalar_indicators(self, start_mark):
chomping = None
increment = None
ch = self.peek()
if (ch in '+-'):
if (ch == '+'):
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if (ch in ''):
increment = int(ch)
if (increment == 0):
raise ScannerError('while scanning a block scalar', start_mark, 'expected indentation indicator in the range 1-9, but found 0', self.get_mark())
self.forward()
elif (ch in ''):
increment = int(ch)
if (increment == 0):
raise ScannerError('while scanning a block scalar', start_mark, 'expected indentation indicator in the range 1-9, but found 0', self.get_mark())
self.forward()
ch = self.peek()
if (ch in '+-'):
if (ch == '+'):
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if (ch not in '\x00 \r\n\x85\u2028\u2029'):
raise ScannerError('while scanning a block scalar', start_mark, ('expected chomping or indentation indicators, but found %r' % ch), self.get_mark())
return (chomping, increment)
def scan_block_scalar_ignored_line(self, start_mark):
while (self.peek() == ' '):
self.forward()
if (self.peek() == '#'):
while (self.peek() not in '\x00\r\n\x85\u2028\u2029'):
self.forward()
ch = self.peek()
if (ch not in '\x00\r\n\x85\u2028\u2029'):
raise ScannerError('while scanning a block scalar', start_mark, ('expected a comment or a line break, but found %r' % ch), self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
chunks = []
max_indent = 0
end_mark = self.get_mark()
while (self.peek() in ' \r\n\x85\u2028\u2029'):
if (self.peek() != ' '):
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if (self.column > max_indent):
max_indent = self.column
return (chunks, max_indent, end_mark)
def scan_block_scalar_breaks(self, indent):
chunks = []
end_mark = self.get_mark()
while ((self.column < indent) and (self.peek() == ' ')):
self.forward()
while (self.peek() in '\r\n\x85\u2028\u2029'):
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while ((self.column < indent) and (self.peek() == ' ')):
self.forward()
return (chunks, end_mark)
def scan_flow_scalar(self, style):
if (style == '"'):
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while (self.peek() != quote):
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(''.join(chunks), False, start_mark, end_mark, style)
ESCAPE_REPLACEMENTS = {'0': '\x00', 'a': '\x07', 'b': '\x08', 't': '\t', '\t': '\t', 'n': '\n', 'v': '\x0b', 'f': '\x0c', 'r': '\r', 'e': '\x1b', ' ': ' ', '"': '"', '\\': '\\', '/': '/', 'N': '\x85', '_': '\xa0', 'L': '\u2028', 'P': '\u2029'}
ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8}
def scan_flow_scalar_non_spaces(self, double, start_mark):
chunks = []
while True:
length = 0
while (self.peek(length) not in '\'"\\\x00 \t\r\n\x85\u2028\u2029'):
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if ((not double) and (ch == "'") and (self.peek(1) == "'")):
chunks.append("'")
self.forward(2)
elif ((double and (ch == "'")) or ((not double) and (ch in '"\\'))):
chunks.append(ch)
self.forward()
elif (double and (ch == '\\')):
self.forward()
ch = self.peek()
if (ch in self.ESCAPE_REPLACEMENTS):
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif (ch in self.ESCAPE_CODES):
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if (self.peek(k) not in 'ABCDEFabcdef'):
raise ScannerError('while scanning a double-quoted scalar', start_mark, ('expected escape sequence of %d hexdecimal numbers, but found %r' % (length, self.peek(k))), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(chr(code))
self.forward(length)
elif (ch in '\r\n\x85\u2028\u2029'):
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError('while scanning a double-quoted scalar', start_mark, ('found unknown escape character %r' % ch), self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
chunks = []
length = 0
while (self.peek(length) in ' \t'):
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if (ch == '\x00'):
raise ScannerError('while scanning a quoted scalar', start_mark, 'found unexpected end of stream', self.get_mark())
elif (ch in '\r\n\x85\u2028\u2029'):
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if (line_break != '\n'):
chunks.append(line_break)
elif (not breaks):
chunks.append(' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
chunks = []
while True:
prefix = self.prefix(3)
if (((prefix == '---') or (prefix == '...')) and (self.peek(3) in '\x00 \t\r\n\x85\u2028\u2029')):
raise ScannerError('while scanning a quoted scalar', start_mark, 'found unexpected document separator', self.get_mark())
while (self.peek() in ' \t'):
self.forward()
if (self.peek() in '\r\n\x85\u2028\u2029'):
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = (self.indent + 1)
spaces = []
while True:
length = 0
if (self.peek() == '#'):
break
while True:
ch = self.peek(length)
if ((ch in '\x00 \t\r\n\x85\u2028\u2029') or ((ch == ':') and (self.peek((length + 1)) in ('\x00 \t\r\n\x85\u2028\u2029' + (u',[]{}' if self.flow_level else u'')))) or (self.flow_level and (ch in ',?[]{}'))):
break
length += 1
if (length == 0):
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if ((not spaces) or (self.peek() == '#') or ((not self.flow_level) and (self.column < indent))):
break
return ScalarToken(''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
chunks = []
length = 0
while (self.peek(length) in ' '):
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if (ch in '\r\n\x85\u2028\u2029'):
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (((prefix == '---') or (prefix == '...')) and (self.peek(3) in '\x00 \t\r\n\x85\u2028\u2029')):
return
breaks = []
while (self.peek() in ' \r\n\x85\u2028\u2029'):
if (self.peek() == ' '):
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (((prefix == '---') or (prefix == '...')) and (self.peek(3) in '\x00 \t\r\n\x85\u2028\u2029')):
return
if (line_break != '\n'):
chunks.append(line_break)
elif (not breaks):
chunks.append(' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
ch = self.peek()
if (ch != '!'):
raise ScannerError(('while scanning a %s' % name), start_mark, ("expected '!', but found %r" % ch), self.get_mark())
length = 1
ch = self.peek(length)
if (ch != ' '):
while (('0' <= ch <= '9') or ('A' <= ch <= 'Z') or ('a' <= ch <= 'z') or (ch in '-_')):
length += 1
ch = self.peek(length)
if (ch != '!'):
self.forward(length)
raise ScannerError(('while scanning a %s' % name), start_mark, ("expected '!', but found %r" % ch), self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
chunks = []
length = 0
ch = self.peek(length)
while (('0' <= ch <= '9') or ('A' <= ch <= 'Z') or ('a' <= ch <= 'z') or (ch in "-;/?:&=+$,_.!~*'()[]%")):
if (ch == '%'):
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if (not chunks):
raise ScannerError(('while parsing a %s' % name), start_mark, ('expected URI, but found %r' % ch), self.get_mark())
return ''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
codes = []
mark = self.get_mark()
while (self.peek() == '%'):
self.forward()
for k in range(2):
if (self.peek(k) not in 'ABCDEFabcdef'):
raise ScannerError(('while scanning a %s' % name), start_mark, ('expected URI escape sequence of 2 hexdecimal numbers, but found %r' % self.peek(k)), self.get_mark())
codes.append(int(self.prefix(2), 16))
self.forward(2)
try:
value = bytes(codes).decode('utf-8')
except UnicodeDecodeError as exc:
raise ScannerError(('while scanning a %s' % name), start_mark, str(exc), mark)
return value
def scan_line_break(self):
ch = self.peek()
if (ch in '\r\n\x85'):
if (self.prefix(2) == '\r\n'):
self.forward(2)
else:
self.forward()
return '\n'
elif (ch in '\u2028\u2029'):
self.forward()
return ch
return '' |
class ConvBN(nn.Sequential):
def __init__(self, c1, c2, k, s, p):
super().__init__(nn.Conv2d(c1, c2, k, s, p), nn.BatchNorm2d(c2)) |
def endstate(state):
A = state.add_read('A')
t = state.add_tasklet('endtask', {'a'}, {}, 'printf("done %f\\n", a)')
state.add_edge(A, None, t, 'a', dace.Memlet(data='A', subset='0')) |
class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_inputs = {'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}}
if self.use_past:
common_inputs['attention_mask'][1] = 'past_encoder_sequence + sequence'
common_inputs['decoder_input_ids'] = {0: 'batch'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
return common_inputs
def default_onnx_opset(self) -> int:
return 13 |
def _get_generator(seed: int) -> torch.Generator:
rng = torch.Generator()
rng.manual_seed(seed)
return rng |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.