code stringlengths 281 23.7M |
|---|
.parametrize('attn_method', ATTN_METHODS)
def test_performer_freezes_during_inference_time(attn_method):
kwargs = {'num_heads': 2, 'key_dim': 20, 'attention_method': attn_method, 'supports': 2}
(model, (x, y)) = get_fitted_model(**kwargs)
y1 = model.predict(x)
y2 = model.predict(x)
assert np.allclose(y1, y2) |
def estimated_steps_in_epoch(dataloader: Iterable[object], *, num_steps_completed: int, max_steps: Optional[int], max_steps_per_epoch: Optional[int]) -> float:
total = float('inf')
if isinstance(dataloader, Sized):
try:
total = len(dataloader)
except (NotImplementedError, TypeError):
pass
if (max_steps_per_epoch and max_steps):
total = min(total, max_steps_per_epoch, (max_steps - num_steps_completed))
elif max_steps:
total = min(total, (max_steps - num_steps_completed))
elif max_steps_per_epoch:
total = min(total, max_steps_per_epoch)
return total |
class QueryAndGroup(nn.Module):
def __init__(self, radius, nsample, use_xyz=True):
super(QueryAndGroup, self).__init__()
(self.radius, self.nsample, self.use_xyz) = (radius, nsample, use_xyz)
def forward(self, xyz, new_xyz, features=None):
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze((- 1))
if (features is not None):
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1)
else:
new_features = grouped_features
else:
assert self.use_xyz, 'Cannot have not features and not use xyz as a feature!'
new_features = grouped_xyz
return new_features |
def calc_dino_div(dino, div, generated_images, split='train'):
dino_score = dino.img_to_img_similarity(reference_images, generated_images).item()
div_score = div.get_score(generated_images).item()
logs = {f'{split}_dino_score': dino_score, f'{split}_div_score': div_score}
return logs |
class TestChangeFilter():
(autouse=True)
def cleanup_globals(self, monkeypatch):
monkeypatch.setattr(config, 'change_filters', [])
.parametrize('option', ['foobar', 'tab', 'tabss', 'tabs.'])
def test_unknown_option(self, option):
cf = config.change_filter(option)
with pytest.raises(configexc.NoOptionError):
cf.validate()
.parametrize('option', ['confirm_quit', 'tabs', 'tabs.show'])
def test_validate(self, option):
cf = config.change_filter(option)
cf.validate()
assert (cf in config.change_filters)
.parametrize('method', [True, False])
.parametrize('option, changed, matches', [('confirm_quit', 'confirm_quit', True), ('tabs', 'tabs.show', True), ('tabs.show', 'tabs.show', True), ('tabs', None, True), ('tabs', 'colors.tabs.bar.bg', False)])
def test_call(self, method, option, changed, matches):
was_called = False
if method:
class Foo():
_filter(option)
def meth(self):
nonlocal was_called
was_called = True
foo = Foo()
foo.meth(changed)
else:
_filter(option, function=True)
def func():
nonlocal was_called
was_called = True
func(changed)
assert (was_called == matches) |
class SongsMenuPlugin(MenuItemPlugin):
plugin_single_song = None
plugin_song = None
plugin_songs = None
plugin_single_album = None
plugin_album = None
plugin_albums = None
def __init__(self, songs=None, library=None):
super().__init__()
self.__library = library
self.__songs = (songs or [])
self.set_sensitive(bool(self.plugin_handles(songs)))
def plugin_handles(self, songs):
return True
def handles_albums(self):
return any(map(callable, [self.plugin_single_album, self.plugin_album, self.plugin_albums]))
def plugin_finish(self):
check_wrapper_changed(self.__library, self.__songs) |
class SwiGLUFFN(nn.Module):
def __init__(self, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None, act_layer: Callable[(..., nn.Module)]=None, drop: float=0.0, bias: bool=True) -> None:
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.w12 = nn.Linear(in_features, (2 * hidden_features), bias=bias)
self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
def forward(self, x: Tensor) -> Tensor:
x12 = self.w12(x)
(x1, x2) = x12.chunk(2, dim=(- 1))
hidden = (F.silu(x1) * x2)
return self.w3(hidden) |
def build_description_from_identifier(identifier: str) -> str:
(python_identifier, _, platform_identifier) = identifier.partition('-')
build_description = ''
python_interpreter = python_identifier[0:2]
python_version = python_identifier[2:]
if (python_interpreter == 'cp'):
build_description += 'CPython'
elif (python_interpreter == 'pp'):
build_description += 'PyPy'
else:
msg = f'unknown python {python_interpreter!r}'
raise Exception(msg)
build_description += f' {python_version[0]}.{python_version[1:]} '
try:
build_description += PLATFORM_IDENTIFIER_DESCRIPTIONS[platform_identifier]
except KeyError as e:
msg = f'unknown platform {platform_identifier!r}'
raise Exception(msg) from e
return build_description |
class noop_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
(yield obj)
def log(self, stats, tag=None, step=None):
pass
def print(self, stats, tag=None, step=None):
pass |
def test_convert_dependencies() -> None:
package = ProjectPackage('foo', '1.2.3')
result = SdistBuilder.convert_dependencies(package, [Dependency('A', '^1.0'), Dependency('B', '~1.0'), Dependency('C', '1.2.3'), VCSDependency('D', 'git', ' Dependency('E', '^1.0'), Dependency('F', '^1.0,!=1.3')])
main = ['A>=1.0,<2.0', 'B>=1.0,<1.1', 'C==1.2.3', 'D git+ 'E>=1.0,<2.0', 'F>=1.0,<2.0,!=1.3']
extras: dict[(str, Any)] = {}
assert (result == (main, extras))
package = ProjectPackage('foo', '1.2.3')
package.extras = {canonicalize_name('bar'): [Dependency('A', '*')]}
result = SdistBuilder.convert_dependencies(package, [Dependency('A', '>=1.2', optional=True), Dependency('B', '~1.0'), Dependency('C', '1.2.3')])
main = ['B>=1.0,<1.1', 'C==1.2.3']
extras = {'bar': ['A>=1.2']}
assert (result == (main, extras))
c = Dependency('C', '1.2.3')
c.python_versions = '~2.7 || ^3.6'
d = Dependency('D', '3.4.5', optional=True)
d.python_versions = '~2.7 || ^3.4'
package.extras = {canonicalize_name('baz'): [Dependency('D', '*')]}
result = SdistBuilder.convert_dependencies(package, [Dependency('A', '>=1.2', optional=True), Dependency('B', '~1.0'), c, d])
main = ['B>=1.0,<1.1']
extra_python = ':python_version >= "2.7" and python_version < "2.8" or python_version >= "3.6" and python_version < "4.0"'
extra_d_dependency = 'baz:python_version >= "2.7" and python_version < "2.8" or python_version >= "3.4" and python_version < "4.0"'
extras = {extra_python: ['C==1.2.3'], extra_d_dependency: ['D==3.4.5']}
assert (result == (main, extras)) |
class ModuleWithRecordsAndDistance(ModuleWithRecords):
def __init__(self, distance=None, **kwargs):
super().__init__(**kwargs)
self.distance = (self.get_distance() if (distance is None) else distance)
def get_default_distance(self):
return LpDistance(p=2)
def get_distance(self):
return self.get_default_distance() |
def _create_translator_gates_field(game: GameDescription, gate_assignment: dict[(NodeIdentifier, Requirement)]) -> list:
return [{'gate_index': game.region_list.node_by_identifier(identifier).extra['gate_index'], 'translator_index': translator_index_for_requirement(requirement)} for (identifier, requirement) in gate_assignment.items()] |
class Docker(Module):
def __init__(self, name):
self._name = name
super().__init__()
def inspect(self):
output = self.check_output('docker inspect %s', self._name)
return json.loads(output)[0]
def is_running(self):
return self.inspect()['State']['Running']
def is_restarting(self):
return self.inspect()['State']['Restarting']
def id(self):
return self.inspect()['Id']
def name(self):
return self.inspect()['Name'][1:]
def client_version(cls):
return cls.version('{{.Client.Version}}')
def server_version(cls):
return cls.version('{{.Server.Version}}')
def version(cls, format=None):
cmd = 'docker version'
if format:
cmd = "{} --format '{}'".format(cmd, format)
return cls.check_output(cmd)
def get_containers(cls, **filters):
cmd = "docker ps --all --quiet --format '{{.Names}}'"
args = []
for (key, value) in filters.items():
if isinstance(value, (list, tuple)):
values = value
else:
values = [value]
for v in values:
cmd += ' --filter %s=%s'
args += [key, v]
result = []
for docker_id in cls(None).check_output(cmd, *args).splitlines():
result.append(cls(docker_id))
return result
def __repr__(self):
return '<docker {}>'.format(self._name) |
def _print_configs(exp_dir, set_name, model_conf, train_conf, dataset_conf):
if (model_conf['n_bins'] != dataset_conf['n_bins']):
raise ValueError(('model and dataset n_bins not matched (%s != %s)' % (model_conf['n_bins'], dataset_conf['n_bins'])))
info(('Experiment Directory:\n\t%s' % str(exp_dir)))
info(('Set Name:\n\t%s' % str(set_name)))
info('Model Configurations:')
for (k, v) in sorted(model_conf.items()):
info(('\t%s : %s' % (k.ljust(20), v)))
info('Training Configurations:')
for (k, v) in sorted(train_conf.items()):
info(('\t%s : %s' % (k.ljust(20), v)))
info('Dataset Configurations:')
for (k, v) in sorted(dataset_conf.items()):
info(('\t%s : %s' % (k.ljust(20), v))) |
def get_errors_from_single_artifact(artifact_zip_path, job_links=None):
errors = []
failed_tests = []
job_name = None
with zipfile.ZipFile(artifact_zip_path) as z:
for filename in z.namelist():
if (not os.path.isdir(filename)):
if (filename in ['failures_line.txt', 'summary_short.txt', 'job_name.txt']):
with z.open(filename) as f:
for line in f:
line = line.decode('UTF-8').strip()
if (filename == 'failures_line.txt'):
try:
error_line = line[:line.index(': ')]
error = line[(line.index(': ') + len(': ')):]
errors.append([error_line, error])
except Exception:
pass
elif ((filename == 'summary_short.txt') and line.startswith('FAILED ')):
test = line[len('FAILED '):]
failed_tests.append(test)
elif (filename == 'job_name.txt'):
job_name = line
if (len(errors) != len(failed_tests)):
raise ValueError(f'`errors` and `failed_tests` should have the same number of elements. Got {len(errors)} for `errors` and {len(failed_tests)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some problem.')
job_link = None
if (job_name and job_links):
job_link = job_links.get(job_name, None)
result = [((x + [y]) + [job_link]) for (x, y) in zip(errors, failed_tests)]
return result |
def test_shared_dependencies_with_overlapping_constraints(root: ProjectPackage, provider: Provider, repo: Repository) -> None:
root.add_dependency(Factory.create_dependency('a', '1.0.0'))
root.add_dependency(Factory.create_dependency('b', '1.0.0'))
add_to_repo(repo, 'a', '1.0.0', deps={'shared': '>=2.0.0 <4.0.0'})
add_to_repo(repo, 'b', '1.0.0', deps={'shared': '>=3.0.0 <5.0.0'})
add_to_repo(repo, 'shared', '2.0.0')
add_to_repo(repo, 'shared', '3.0.0')
add_to_repo(repo, 'shared', '3.6.9')
add_to_repo(repo, 'shared', '4.0.0')
add_to_repo(repo, 'shared', '5.0.0')
check_solver_result(root, provider, {'a': '1.0.0', 'b': '1.0.0', 'shared': '3.6.9'}) |
(frozen=True)
class Ge(AnnotatedTypesCheck):
value: Any
def predicate(self, value: Any) -> bool:
return (value >= self.value)
def is_compatible_metadata(self, metadata: AnnotatedTypesCheck) -> bool:
if isinstance(metadata, Gt):
return (metadata.value >= self.value)
elif isinstance(metadata, Ge):
return (metadata.value >= self.value)
else:
return False |
_canonicalize
_specialize
_rewriter([Elemwise])
def local_exp_log(fgraph, node):
x = node.inputs[0]
if (not isinstance(node.op, Elemwise)):
return
if ((not x.owner) or (not isinstance(x.owner.op, Elemwise))):
return
prev_op = x.owner.op.scalar_op
node_op = node.op.scalar_op
if (isinstance(prev_op, ps.Exp) and isinstance(node_op, ps.Log)):
new_out = x.owner.inputs[0]
old_out = node.outputs[0]
if (new_out.dtype != old_out.dtype):
new_out = cast(new_out, old_out.dtype)
return [new_out]
if (isinstance(prev_op, ps.Expm1) and isinstance(node_op, ps.Log1p)):
new_out = x.owner.inputs[0]
old_out = node.outputs[0]
if (new_out.dtype != old_out.dtype):
new_out = cast(new_out, old_out.dtype)
return [new_out]
if (isinstance(prev_op, ps_math.Softplus) and isinstance(node_op, ps.Exp)):
x = x.owner.inputs[0]
return [add(1, exp(x))]
if (isinstance(prev_op, ps_math.Softplus) and isinstance(node_op, ps.Expm1)):
x = x.owner.inputs[0]
return [exp(x)] |
_macro
def OpenJupyterNotebook(path=None, browser=False):
try:
if path:
if (not os.path.isabs(path)):
xl = xl_app(com_package='win32com')
wb = xl.ActiveWorkbook
if ((wb is not None) and wb.FullName and os.path.exists(wb.FullName)):
abs_path = os.path.join(os.path.dirname(wb.FullName), path)
if os.path.exists(abs_path):
path = abs_path
if (not os.path.exists(path)):
raise RuntimeError(f"Path '{path}' not found.")
initial_path = None
notebook_path = None
if path:
if os.path.isdir(path):
initial_path = path
elif os.path.isfile(path):
notebook_path = path
else:
raise RuntimeError(f"Something is wrong with the path '{path}'.")
open_jupyter = (open_jupyter_notebook_in_browser if browser else open_jupyter_notebook)
schedule_call(partial(open_jupyter, initial_path=initial_path, notebook_path=notebook_path))
return True
except Exception as e:
xlcAlert(f'Error opening Jupyter notebook: {e}')
raise |
class GroupBadgeManager(BadgeRenderMixin, CRUDMixin, RESTManager):
_path = '/groups/{group_id}/badges'
_obj_cls = GroupBadge
_from_parent_attrs = {'group_id': 'id'}
_create_attrs = RequiredOptional(required=('link_url', 'image_url'))
_update_attrs = RequiredOptional(optional=('link_url', 'image_url'))
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> GroupBadge:
return cast(GroupBadge, super().get(id=id, lazy=lazy, **kwargs)) |
class TestSessionReports():
def test_collect_result(self, pytester: Pytester) -> None:
col = pytester.getmodulecol('\n def test_func1():\n pass\n class TestClass(object):\n pass\n ')
rep = runner.collect_one_node(col)
assert (not rep.failed)
assert (not rep.skipped)
assert rep.passed
locinfo = rep.location
assert (locinfo is not None)
assert (locinfo[0] == col.path.name)
assert (not locinfo[1])
assert (locinfo[2] == col.path.name)
res = rep.result
assert (len(res) == 2)
assert (res[0].name == 'test_func1')
assert (res[1].name == 'TestClass') |
def test_model_policy_gradient():
x0 = np.random.randn(5)
result = model_policy_gradient(sum_of_squares, x0, learning_rate=0.1, decay_rate=0.96, decay_steps=10, log_sigma_init=(- 6.0), max_iterations=120, batch_size=30, radius_coeff=3.0, warmup_steps=10, known_values=None)
np.testing.assert_allclose(result.x, np.zeros(len(result.x)), atol=0.01)
np.testing.assert_allclose(result.fun, 0, atol=1e-07)
assert isinstance(result.nfev, int) |
def _initial_iv_params(ivcurves, ee, voc, isc, rsh, nnsvth):
n = len(ivcurves['v_oc'])
io = np.ones(n)
iph = np.ones(n)
rs = np.ones(n)
for j in range(n):
if (rsh[j] > 0):
(volt, curr) = rectify_iv_curve(ivcurves['v'][j], ivcurves['i'][j])
io[j] = ((isc[j] - (voc[j] / rsh[j])) * np.exp(((- voc[j]) / nnsvth[j])))
[didv, d2id2v] = _numdiff(volt, curr)
t3 = (volt > (0.5 * voc[j]))
t4 = (volt < (0.9 * voc[j]))
tmp = (((- rsh[j]) * didv) - 1.0)
with np.errstate(invalid='ignore'):
v = np.logical_and.reduce(np.array([t3, t4, (~ np.isnan(tmp)), np.greater(tmp, 0)]))
if np.any(v):
vtrs = ((nnsvth[j] / isc[j]) * (np.log(((tmp[v] * nnsvth[j]) / (rsh[j] * io[j]))) - (volt[v] / nnsvth[j])))
rs[j] = np.mean(vtrs[(vtrs > 0)], axis=0)
else:
rs[j] = 0.0
iph[j] = ((isc[j] + (io[j] * np.expm1((isc[j] / nnsvth[j])))) + ((isc[j] * rs[j]) / rsh[j]))
else:
io[j] = np.nan
rs[j] = np.nan
iph[j] = np.nan
u = _filter_params(ee, isc, io, rs, rsh)
io[u] = _update_io(voc[u], iph[u], io[u], rs[u], rsh[u], nnsvth[u])
iph = ((isc + (io * np.expm1(((rs * isc) / nnsvth)))) + ((isc * rs) / rsh))
return (iph, io, rs, u) |
def read_file():
global list
global sequence_num
global sigmasize
global filename
f = open(filename)
sequence_num = 0
line = f.readline()
while line:
list.append(line)
sequence_num = (sequence_num + 1)
line = f.readline()
f.close()
print(sequence_num) |
def main(args):
wav_scp = codecs.open((Path(args.path) / 'wav.scp'), 'r', 'utf-8')
textgrid_flist = codecs.open((Path(args.path) / 'textgrid.flist'), 'r', 'utf-8')
utt2textgrid = {}
for line in textgrid_flist:
lines = line.strip().split(' ')
path = Path(lines[1])
uttid = lines[0]
utt2textgrid[uttid] = path
all_segments = []
for line in wav_scp:
uttid = line.strip().split(' ')[0]
uttid_part = uttid
if (uttid_part not in utt2textgrid):
print(("%s doesn't have transcription" % uttid))
continue
segments = []
tg = textgrid.TextGrid.fromFile(utt2textgrid[uttid_part])
for i in range(tg.__len__()):
for j in range(tg[i].__len__()):
if tg[i][j].mark:
segments.append(Segment(uttid, tg[i].name, tg[i][j].minTime, tg[i][j].maxTime, tg[i][j].mark.strip()))
segments = sorted(segments, key=(lambda x: x.stime))
segments = preposs_overlap(segments, args.max_length, args.overlap_length)
all_segments += segments
wav_scp.close()
textgrid_flist.close()
segments_file = codecs.open((Path(args.path) / 'segments_all'), 'w', 'utf-8')
utt2spk_file = codecs.open((Path(args.path) / 'utt2spk_all'), 'w', 'utf-8')
text_file = codecs.open((Path(args.path) / 'text_all_old'), 'w', 'utf-8')
text_file_new = codecs.open((Path(args.path) / 'text_all'), 'w', 'utf-8')
utt2spk_file_new = codecs.open((Path(args.path) / 'utt2spk_new'), 'w', 'utf-8')
for i in range(len(all_segments)):
utt_name = ('%s-%s-%07d-%07d' % (all_segments[i].uttid, all_segments[i].spkr, (all_segments[i].stime * 100), (all_segments[i].etime * 100)))
segments_file.write(('%s %s %.2f %.2f\n' % (utt_name, all_segments[i].uttid, all_segments[i].stime, all_segments[i].etime)))
utt2spk_file.write(('%s %s-%s\n' % (utt_name, all_segments[i].uttid, all_segments[i].spkr)))
text_file.write(('%s %s\n' % (utt_name, all_segments[i].text)))
spk_array = ''
text_array = ''
for (key, value) in all_segments[i].spk_text.items():
if (value != ''):
spk_array += ('src' + key)
text_array += ('src' + value)
if (spk_array != ''):
spk_array = spk_array[3:]
text_array = text_array[3:]
utt2spk_file_new.write(('%s %s\n' % (utt_name, spk_array)))
text_file_new.write(('%s %s\n' % (utt_name, text_array)))
segments_file.close()
utt2spk_file.close()
text_file.close()
utt2spk_file_new.close()
text_file_new.close() |
class ACCESS_ALLOWED_ACE(ACE):
def __init__(self):
self.AceType = ACEType.ACCESS_ALLOWED_ACE_TYPE
self.AceFlags = None
self.AceSize = 0
self.Mask = None
self.Sid = None
self.sd_object_type = None
def from_buffer(buff, sd_object_type=None):
ace = ACCESS_ALLOWED_ACE()
ace.sd_object_type = (SE_OBJECT_TYPE(sd_object_type) if sd_object_type else None)
ace.AceType = ACEType(int.from_bytes(buff.read(1), 'little', signed=False))
ace.AceFlags = AceFlags(int.from_bytes(buff.read(1), 'little', signed=False))
ace.AceSize = int.from_bytes(buff.read(2), 'little', signed=False)
ace.Mask = int.from_bytes(buff.read(4), 'little', signed=False)
ace.Sid = SID.from_buffer(buff)
return ace
def to_buffer(self, buff):
t = self.Mask.to_bytes(4, 'little', signed=False)
t += self.Sid.to_bytes()
t = ACE.add_padding(t)
self.AceSize = (4 + len(t))
buff.write(self.AceType.value.to_bytes(1, 'little', signed=False))
buff.write(self.AceFlags.to_bytes(1, 'little', signed=False))
buff.write(self.AceSize.to_bytes(2, 'little', signed=False))
buff.write(t)
def to_ssdl(self, sd_object_type=None):
return ('(%s;%s;%s;%s;%s;%s)' % (SSDL_ACE_TYPE_MAPS_INV[self.AceType], aceflags_to_ssdl(self.AceFlags), accessmask_to_sddl(self.Mask, self.sd_object_type), '', '', self.Sid.to_ssdl()))
def to_dict(self, sd_object_type=None):
return {'ace_type': SSDL_ACE_TYPE_MAPS_INV[self.AceType], 'ace_flags': aceflags_to_ssdl(self.AceFlags), 'rights': accessmask_to_sddl(self.Mask, self.sd_object_type), 'object_guid': '', 'inherit_object_guid': '', 'account_sid': str(self.Sid)}
def __str__(self):
t = 'ACCESS_ALLOWED_ACE\r\n'
t += ('Flags: %s\r\n' % str(self.AceFlags))
t += ('Sid: %s\r\n' % self.Sid)
t += ('Mask: %s\r\n' % mask_to_str(self.Mask, self.sd_object_type))
return t |
def get_md_entry(DB, entry, add_comments=True):
md_str = '\n'
md_str += '- '
venue = ''
year = ''
if ('booktitle' in entry.keys()):
venue = entry['booktitle'].replace('Proceedings of ', '')
if ('journal' in entry.keys()):
venue += entry['journal'].replace('{', '').replace('}', '')
venue = venue.replace(' ', '_').replace('-', '_')
if ('year' in entry.keys()):
year = entry['year']
if ((venue != '') or (year != '')):
tag = '
if ('url' not in entry.keys()):
print(entry['ID'])
tag = '[{}]({})'.format(tag, entry['url'])
md_str += '{}'.format(tag)
else:
md_str += ''
paper_title = entry['title'].replace('{', '')
paper_title = paper_title.replace('}', '')
paper_title = paper_title.strip()
img_link = f'
gs_link = (' + '+'.join(paper_title.split()))
md_str += f'<a href="{gs_link}"><img src="{img_link}" height="18" align="bottom"></a>'
if ('url' in entry.keys()):
md_str += ((((' [**' + paper_title) + '**](') + entry['url']) + ') ')
else:
md_str += ((' **' + paper_title) + '**')
md_str += ', <br>'
md_str += ((' by *' + keep_last_and_only(entry['author'])) + '*')
md_str += ((' [[bib]](' + create_bib_link(entry['ID'])) + ') ')
if add_comments:
if (entry['ID'].lower() in DB.strings):
md_str += '```'
md_str += DB.strings[entry['ID'].lower()]
md_str += '\n```'
md_str += '</details>'
img_link = os.path.join(base_link, 'scripts/svg/copy_icon.png')
md_str += f'<details><summary><img src={img_link} height="20" align="bottom"></summary>'
md_str += f"<pre>```{entry['ID']}```"
return md_str |
def set_tensor(module: 'torch.nn.Module', name: str, tensor: torch.Tensor) -> None:
if (name in module._parameters):
del module._parameters[name]
was_buffer = (name in module._buffers)
if was_buffer:
del module._buffers[name]
if isinstance(tensor, nn.Parameter):
module.__dict__.pop(name, None)
_register_params(module, name, tensor)
elif (was_buffer and isinstance(tensor, Tensor)):
module._buffers[name] = tensor
else:
module.__dict__[name] = tensor |
def info(filepath: Union[(str, Path)]) -> Dict[(str, Union[(str, Number)])]:
info_dictionary = {'channels': channels(filepath), 'sample_rate': sample_rate(filepath), 'bitdepth': bitdepth(filepath), 'bitrate': bitrate(filepath), 'duration': duration(filepath), 'num_samples': num_samples(filepath), 'encoding': encoding(filepath), 'silent': silent(filepath)}
return info_dictionary |
class RealtimeHandler(EventsDemoHandler):
.coroutine
def get(self):
url = f'{options.admin_endpoint_base}/events/1/events/realtime/get'
params = {'company_id': options.company_id, 'cursor': self.next_cursor()}
response = requests.request('GET', url, headers=self.authorized_headers(), params=params)
raw_html_output = self.pretty_html_formatting(response)
if options.enable_audit_for_realtime:
for event in response.json()['events']:
audit_event(self, event)
self.write(raw_html_output)
def pretty_html_formatting(self, response):
response_json = response.json()
next_events_url = f"{self.reverse_url('realtime')}?cursor={response_json['next_cursor']}"
pretty_response = json.dumps(response_json, sort_keys=True, indent=4).replace('"event"', '<span style="background-color:#00FEFE">"event"</span>')
return f'''
<a href={next_events_url}>Fetch Next Events</a>
</br>
</br>
<h3>Raw JSON response:</h3>
<pre>{pretty_response}</pre>
''' |
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None, log_writer=None, wandb_logger=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, num_training_steps_per_epoch=None, update_freq=None, use_amp=False):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 200
optimizer.zero_grad()
for (data_iter_step, (samples, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = (data_iter_step // update_freq)
if (step >= num_training_steps_per_epoch):
continue
it = (start_steps + step)
if ((lr_schedule_values is not None) or ((wd_schedule_values is not None) and ((data_iter_step % update_freq) == 0))):
for (i, param_group) in enumerate(optimizer.param_groups):
if (lr_schedule_values is not None):
param_group['lr'] = (lr_schedule_values[it] * param_group['lr_scale'])
if ((wd_schedule_values is not None) and (param_group['weight_decay'] > 0)):
param_group['weight_decay'] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
if use_amp:
with torch.cuda.amp.autocast():
output = model(samples)
loss = criterion(output, targets)
else:
output = model(samples)
loss = criterion(output, targets)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
assert math.isfinite(loss_value)
if use_amp:
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order, update_grad=(((data_iter_step + 1) % update_freq) == 0))
if (((data_iter_step + 1) % update_freq) == 0):
optimizer.zero_grad()
if (model_ema is not None):
model_ema.update(model)
else:
loss /= update_freq
loss.backward()
if (((data_iter_step + 1) % update_freq) == 0):
optimizer.step()
optimizer.zero_grad()
if (model_ema is not None):
model_ema.update(model)
torch.cuda.synchronize()
if (mixup_fn is None):
class_acc = (output.max((- 1))[(- 1)] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
min_lr = 10.0
max_lr = 0.0
for group in optimizer.param_groups:
min_lr = min(min_lr, group['lr'])
max_lr = max(max_lr, group['lr'])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if (group['weight_decay'] > 0):
weight_decay_value = group['weight_decay']
metric_logger.update(weight_decay=weight_decay_value)
if use_amp:
metric_logger.update(grad_norm=grad_norm)
if (log_writer is not None):
log_writer.update(loss=loss_value, head='loss')
log_writer.update(class_acc=class_acc, head='loss')
log_writer.update(lr=max_lr, head='opt')
log_writer.update(min_lr=min_lr, head='opt')
log_writer.update(weight_decay=weight_decay_value, head='opt')
if use_amp:
log_writer.update(grad_norm=grad_norm, head='opt')
log_writer.set_step()
if wandb_logger:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_loss': loss_value, 'Rank-0 Batch Wise/train_max_lr': max_lr, 'Rank-0 Batch Wise/train_min_lr': min_lr}, commit=False)
if class_acc:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_class_acc': class_acc}, commit=False)
if use_amp:
wandb_logger._wandb.log({'Rank-0 Batch Wise/train_grad_norm': grad_norm}, commit=False)
wandb_logger._wandb.log({'Rank-0 Batch Wise/global_train_step': it})
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def save_val_samples_funieGAN(samples_dir, gen_imgs, step, N_samples=3, N_ims=3):
row = N_samples
col = N_ims
titles = ['Input', 'Generated', 'Original']
(fig, axs) = plt.subplots(row, col)
cnt = 0
for j in range(col):
for i in range(row):
axs[(i, j)].imshow(gen_imgs[cnt])
axs[(i, j)].set_title(titles[j])
axs[(i, j)].axis('off')
cnt += 1
fig.savefig(os.path.join(samples_dir, ('%d.png' % step)))
plt.close() |
class A():
is_an_a: ClassVar[bool] = True
not_assigned_to: ClassVar[str]
def __init__(self):
self.instance_var: bool = True
async def async_method(self, wait: bool) -> int:
if wait:
(await asyncio.sleep(1))
return 5
def my_prop(self) -> str:
return 'prop'
def my_method(self) -> str:
return 'method'
def overloaded_method(self, a: float) -> float:
...
def overloaded_method(self, a: str) -> str:
...
def overloaded_method(self, a: Union[(float, str)]) -> Union[(float, str)]:
return (a * 2)
def undoc_overloaded_method(self, a: float) -> float:
...
def undoc_overloaded_method(self, a: float) -> float:
return (a * 2)
def overloaded_class_method(cls, a: float) -> float:
...
def overloaded_class_method(cls, a: str) -> str:
...
def overloaded_class_method(cls, a: Union[(float, str)]) -> Union[(float, str)]:
return (a * 2) |
('/api/conversations/get_conversation_list', methods=['POST'])
def get_conversation_list() -> Response:
request_json = request.get_json()
user_id = request_json.pop('user_id', DEFAULT_USER_ID)
conversations = []
try:
db = get_user_conversation_storage()
conversation_list = db.conversation.find({'user_id': user_id})
for conversation in conversation_list:
conversations.append({'id': str(conversation['_id']), 'name': conversation['name'], 'folderId': conversation['folder_id']})
except Exception as e:
return Response(response=None, status=f'{INTERNAL} error fetch conversation list')
return jsonify(conversations) |
def _setup_single_view_dispatcher_route(constructor: ComponentConstructor, options: Options) -> _RouteHandlerSpecs:
return [(f'{STREAM_PATH}/(.*)', ModelStreamHandler, {'component_constructor': constructor, 'url_prefix': options.url_prefix}), (str(STREAM_PATH), ModelStreamHandler, {'component_constructor': constructor, 'url_prefix': options.url_prefix})] |
class XppLexer(RegexLexer):
name = 'X++'
url = '
aliases = ['xpp', 'x++']
filenames = ['*.xpp']
version_added = '2.15'
flags = re.MULTILINE
XPP_CHARS = ((((('?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl')) + '])') + '[^') + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Nd', 'Pc', 'Cf', 'Mn', 'Mc')) + ']*')
XPP_CHARS = XPP_CHARS.replace('\x00', '\x01')
OPERATORS = ('<=', '>=', '+=', '-=', '*=', '/=', '!=', '==', '&&', '||', '>>', '<<', '++', '--', '+', '-', '*', '/', '%', '&', '|', '^', '<', '>', '?', '!', '~', '=')
KEYWORDS = ('abstract', 'anytype', 'as', 'async', 'asc', 'at', 'avg', 'break', 'breakpoint', 'by', 'byref', 'case', 'catch', 'changecompany', 'client', 'container', 'continue', 'count', 'crosscompany', 'default', 'delegate', 'delete_from', 'desc', 'display', 'div', 'do', 'edit', 'else', 'element', 'eventhandler', 'exists', 'false', 'final', 'firstfast', 'firstonly', 'firstonly10', 'firstonly100', 'firstonly1000', 'flush', 'for', 'forceliterals', 'forcenestedloop', 'forceplaceholders', 'forceselectorder', 'forupdate', 'from', 'group', 'if', 'insert_recordset', 'interface', 'is', 'join', 'like', 'maxof', 'minof', 'mod', 'new', 'next', 'nofetch', 'notexists', 'null', 'optimisticlock', 'order', 'outer', 'pause', 'pessimisticlock', 'print', 'private', 'protected', 'public', 'repeatableread', 'retry', 'return', 'reverse', 'select', 'server', 'setting', 'static', 'sum', 'super', 'switch', 'tablelock', 'this', 'throw', 'true', 'try', 'ttsabort', 'ttsbegin', 'ttscommit', 'update_recordset', 'validtimestate', 'void', 'where', 'while', 'window')
RUNTIME_FUNCTIONS = ('_duration', 'abs', 'acos', 'any2Date', 'any2Enum', 'any2Guid', 'any2Int', 'any2Int64', 'any2Real', 'any2Str', 'anytodate', 'anytoenum', 'anytoguid', 'anytoint', 'anytoint64', 'anytoreal', 'anytostr', 'asin', 'atan', 'beep', 'cTerm', 'char2Num', 'classIdGet', 'corrFlagGet', 'corrFlagSet', 'cos', 'cosh', 'curExt', 'curUserId', 'date2Num', 'date2Str', 'datetime2Str', 'dayName', 'dayOfMth', 'dayOfWk', 'dayOfYr', 'ddb', 'decRound', 'dg', 'dimOf', 'endMth', 'enum2str', 'exp', 'exp10', 'fV', 'fieldId2Name', 'fieldId2PName', 'fieldName2Id', 'frac', 'funcName', 'getCurrentPartition', 'getCurrentPartitionRecId', 'getPrefix', 'guid2Str', 'idg', 'indexId2Name', 'indexName2Id', 'int2Str', 'int642Str', 'intvMax', 'intvName', 'intvNo', 'intvNorm', 'log10', 'logN', 'match', 'max', 'min', 'mkDate', 'mthName', 'mthOfYr', 'newGuid', 'nextMth', 'nextQtr', 'nextYr', 'num2Char', 'num2Date', 'num2Str', 'pmt', 'power', 'prevMth', 'prevQtr', 'prevYr', 'prmIsDefault', 'pt', 'pv', 'rate', 'refPrintAll', 'round', 'runAs', 'sessionId', 'setPrefix', 'sin', 'sinh', 'sleep', 'sln', 'str2Date', 'str2Datetime', 'str2Enum', 'str2Guid', 'str2Int', 'str2Int64', 'str2Num', 'str2Time', 'strAlpha', 'strCmp', 'strColSeq', 'strDel', 'strFind', 'strFmt', 'strIns', 'strKeep', 'strLTrim', 'strLen', 'strLine', 'strLwr', 'strNFind', 'strPoke', 'strPrompt', 'strRTrim', 'strRem', 'strRep', 'strScan', 'strUpr', 'subStr', 'syd', 'systemDateGet', 'systemDateSet', 'tableId2Name', 'tableId2PName', 'tableName2Id', 'tan', 'tanh', 'term', 'time2Str', 'timeNow', 'today', 'trunc', 'typeOf', 'uint2Str', 'wkOfYr', 'year')
COMPILE_FUNCTIONS = ('attributeStr', 'classNum', 'classStr', 'configurationKeyNum', 'configurationKeyStr', 'dataEntityDataSourceStr', 'delegateStr', 'dimensionHierarchyLevelStr', 'dimensionHierarchyStr', 'dimensionReferenceStr', 'dutyStr', 'enumCnt', 'enumLiteralStr', 'enumNum', 'enumStr', 'extendedTypeNum', 'extendedTypeStr', 'fieldNum', 'fieldPName', 'fieldStr', 'formControlStr', 'formDataFieldStr', 'formDataSourceStr', 'formMethodStr', 'formStr', 'identifierStr', 'indexNum', 'indexStr', 'licenseCodeNum', 'licenseCodeStr', 'literalStr', 'maxDate', 'maxInt', 'measureStr', 'measurementStr', 'menuItemActionStr', 'menuItemDisplayStr', 'menuItemOutputStr', 'menuStr', 'methodStr', 'minInt', 'privilegeStr', 'queryDatasourceStr', 'queryMethodStr', 'queryStr', 'reportStr', 'resourceStr', 'roleStr', 'ssrsReportStr', 'staticDelegateStr', 'staticMethodStr', 'tableCollectionStr', 'tableFieldGroupStr', 'tableMethodStr', 'tableNum', 'tablePName', 'tableStaticMethodStr', 'tableStr', 'tileStr', 'varStr', 'webActionItemStr', 'webDisplayContentItemStr', 'webFormStr', 'webMenuStr', 'webOutputContentItemStr', 'webReportStr', 'webSiteTempStr', 'webStaticFileStr', 'webUrlItemStr', 'webWebPartStr', 'webletItemStr', 'webpageDefStr', 'websiteDefStr', 'workflowApprovalStr', 'workflowCategoryStr', 'workflowTaskStr', 'workflowTypeStr')
tokens = {}
tokens = {'root': [('(\\s*)\\b(else|if)\\b([^\\n])', bygroups(Whitespace, Keyword, using(this))), ((((('^([ \\t]*)((?:' + XPP_CHARS) + '(?:\\[\\])?\\s+)+?)(') + XPP_CHARS) + ')(\\s*)(\\()'), bygroups(Whitespace, using(this), Name.Function, Whitespace, Punctuation)), ('^(\\s*)(\\[)([^\\n]*?)(\\])', bygroups(Whitespace, Name.Attribute, Name.Variable.Class, Name.Attribute)), ('[^\\S\\n]+', Whitespace), ('(\\\\)(\\n)', bygroups(Text, Whitespace)), ('//[^\\n]*?\\n', Comment.Single), ('/[*][^\\n]*?[*]/', Comment.Multiline), ('\\n', Whitespace), (words(OPERATORS), Operator), ('=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator), ('[()\\[\\];:,.#]', Punctuation), ('[{}]', Punctuation), ('"(""|[^"])*"', String), ('\\$?"(|\\\\[^\\\\]|[^"\\\\\\n])*["\\n]', String), ("'\\\\.'|'[^\\\\]'", String.Char), ('[0-9]+(\\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?', Number), (words(KEYWORDS, suffix='\\b'), Keyword), ('(boolean|int|int64|str|real|guid|date)\\b\\??', Keyword.Type), ('(class|struct|extends|implements)(\\s+)', bygroups(Keyword, Whitespace), 'class'), ((('(' + XPP_CHARS) + ')(::)'), bygroups(Name.Variable.Class, Punctuation)), ('(\\s*)(\\w+)(\\s+\\w+(,|=)?[^\\n]*;)', bygroups(Whitespace, Name.Variable.Class, using(this))), ((((('(fieldNum\\()(' + XPP_CHARS) + ')(\\s*,\\s*)(') + XPP_CHARS) + ')(\\s*\\))'), bygroups(using(this), Name.Variable.Class, using(this), Name.Property, using(this))), ((('(tableNum\\()(' + XPP_CHARS) + ')(\\s*\\))'), bygroups(using(this), Name.Variable.Class, using(this))), (words(RUNTIME_FUNCTIONS, suffix='(?=\\()'), Name.Function.Magic), (words(COMPILE_FUNCTIONS, suffix='(?=\\()'), Name.Function.Magic), (XPP_CHARS, Name)], 'class': [(XPP_CHARS, Name.Class, '#pop'), default('#pop')], 'namespace': [('(?=\\()', Text, '#pop'), ((('(' + XPP_CHARS) + '|\\.)+'), Name.Namespace, '#pop')]} |
class WassersteinUpdater(WassersteinUpdaterFramework):
def __init__(self, *args, **kwargs):
super(WassersteinUpdater, self).__init__(*args, **kwargs)
def g_loss(self, errG):
chainer.report({'loss': errG}, self.G)
return errG
def update_d(self, optimizer):
batch = self.get_iterator('main').next()
inputv = Variable(self.converter(batch, self.device))
errD_real = self.D(inputv)
noisev = Variable(np.asarray(np.random.normal(size=(self.args.batch_size, self.args.nz, 1, 1)), dtype=np.float32))
noisev.to_device(self.device)
fake = self.G(noisev)
errD_fake = self.D(fake)
optimizer.update(self.d_loss, errD_real, errD_fake)
def update_g(self, optimizer):
noisev = Variable(np.asarray(np.random.normal(size=(self.args.batch_size, self.args.nz, 1, 1)), dtype=np.float32))
noisev.to_device(self.device)
fake = self.G(noisev)
errG = self.D(fake)
optimizer.update(self.g_loss, errG) |
def test_poetry_with_supplemental_source(fixture_dir: FixtureDirGetter, with_simple_keyring: None) -> None:
io = BufferedIO()
poetry = Factory().create_poetry(fixture_dir('with_supplemental_source'), io=io)
assert poetry.pool.has_repository('PyPI')
assert (poetry.pool.get_priority('PyPI') is Priority.DEFAULT)
assert isinstance(poetry.pool.repository('PyPI'), PyPiRepository)
assert poetry.pool.has_repository('supplemental')
assert (poetry.pool.get_priority('supplemental') is Priority.SUPPLEMENTAL)
assert isinstance(poetry.pool.repository('supplemental'), LegacyRepository)
assert ({repo.name for repo in poetry.pool.repositories} == {'PyPI', 'supplemental'})
assert (io.fetch_error() == '') |
def import_modules_from_strings(imports, allow_failed_imports=False):
if (not imports):
return
single_import = False
if isinstance(imports, str):
single_import = True
imports = [imports]
if (not isinstance(imports, list)):
raise TypeError(f'custom_imports must be a list but got type {type(imports)}')
imported = []
for imp in imports:
if (not isinstance(imp, str)):
raise TypeError(f'{imp} is of type {type(imp)} and cannot be imported.')
try:
imported_tmp = import_module(imp)
except ImportError:
if allow_failed_imports:
warnings.warn(f'{imp} failed to import and is ignored.', UserWarning)
imported_tmp = None
else:
raise ImportError
imported.append(imported_tmp)
if single_import:
imported = imported[0]
return imported |
class TestCombineValidSubsets(unittest.TestCase):
def _train(self, extra_flags):
with self.assertLogs() as logs:
with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir:
create_dummy_data(data_dir, num_examples=20)
preprocess_lm_data(data_dir)
shutil.copyfile(f'{data_dir}/valid.bin', f'{data_dir}/valid1.bin')
shutil.copyfile(f'{data_dir}/valid.idx', f'{data_dir}/valid1.idx')
train_language_model(data_dir, 'transformer_lm', (['--max-update', '0', '--log-format', 'json'] + extra_flags), run_validation=False)
return [x.message for x in logs.records]
def test_combined(self):
flags = ['--combine-valid-subsets']
logs = self._train(flags)
assert any([('valid1' in x) for x in logs])
assert (not any([('valid1_ppl' in x) for x in logs]))
def test_subsets(self):
flags = ['--valid-subset', 'valid,valid1']
logs = self._train(flags)
assert any([('valid_ppl' in x) for x in logs])
assert any([('valid1_ppl' in x) for x in logs]) |
class TestCRLReason():
def test_invalid_reason_flags(self):
with pytest.raises(TypeError):
x509.CRLReason('notareason')
def test_eq(self):
reason1 = x509.CRLReason(x509.ReasonFlags.unspecified)
reason2 = x509.CRLReason(x509.ReasonFlags.unspecified)
assert (reason1 == reason2)
def test_ne(self):
reason1 = x509.CRLReason(x509.ReasonFlags.unspecified)
reason2 = x509.CRLReason(x509.ReasonFlags.ca_compromise)
assert (reason1 != reason2)
assert (reason1 != object())
def test_hash(self):
reason1 = x509.CRLReason(x509.ReasonFlags.unspecified)
reason2 = x509.CRLReason(x509.ReasonFlags.unspecified)
reason3 = x509.CRLReason(x509.ReasonFlags.ca_compromise)
assert (hash(reason1) == hash(reason2))
assert (hash(reason1) != hash(reason3))
def test_repr(self):
reason1 = x509.CRLReason(x509.ReasonFlags.unspecified)
assert (repr(reason1) == '<CRLReason(reason=ReasonFlags.unspecified)>')
def test_public_bytes(self):
ext = x509.CRLReason(x509.ReasonFlags.ca_compromise)
assert (ext.public_bytes() == b'\n\x01\x02') |
class Description(sa.Attributes):
('Description')
(rus.nothing)
def __init__(self):
self._attributes_extensible(True)
self._attributes_camelcasing(True)
self._attributes_register(EXECUTABLE, None, sa.STRING, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(PRE_EXEC, None, sa.STRING, sa.VECTOR, sa.WRITEABLE)
self._attributes_register(POST_EXEC, None, sa.STRING, sa.VECTOR, sa.WRITEABLE)
self._attributes_register(ARGUMENTS, None, sa.STRING, sa.VECTOR, sa.WRITEABLE)
self._attributes_register(ENVIRONMENT, None, sa.STRING, sa.DICT, sa.WRITEABLE)
self._attributes_register(TOTAL_CPU_COUNT, None, sa.INT, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(TOTAL_GPU_COUNT, None, sa.INT, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(NUMBER_OF_PROCESSES, None, sa.INT, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(PROCESSES_PER_HOST, None, sa.INT, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(THREADS_PER_PROCESS, None, sa.INT, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(WORKING_DIRECTORY, None, sa.STRING, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(INTERACTIVE, None, sa.BOOL, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(INPUT, None, sa.STRING, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(OUTPUT, None, sa.STRING, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(ERROR, None, sa.STRING, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(FILE_TRANSFER, None, sa.ANY, sa.VECTOR, sa.WRITEABLE)
self._attributes_register(CLEANUP, None, sa.BOOL, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(JOB_START_TIME, None, sa.TIME, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(WALL_TIME_LIMIT, None, sa.INT, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(TOTAL_PHYSICAL_MEMORY, None, sa.INT, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(SYSTEM_ARCHITECTURE, {}, sa.STRING, sa.DICT, sa.WRITEABLE)
self._attributes_register(OPERATING_SYSTEM_TYPE, None, sa.ENUM, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(CANDIDATE_HOSTS, None, sa.STRING, sa.VECTOR, sa.WRITEABLE)
self._attributes_register(QUEUE, None, sa.STRING, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(NAME, None, sa.STRING, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(PROJECT, None, sa.STRING, sa.SCALAR, sa.WRITEABLE)
self._attributes_register(JOB_CONTACT, None, sa.STRING, sa.VECTOR, sa.WRITEABLE)
self._attributes_register(SPMD_VARIATION, None, sa.ENUM, sa.SCALAR, sa.WRITEABLE)
self._attributes_set_enums(SPMD_VARIATION, ['MPI', 'OpenMP', 'MPICH-G'])
self._env_is_list = False
self._attributes_set_getter(ENVIRONMENT, self._get_env)
self._attributes_set_setter(ENVIRONMENT, self._set_env)
def _set_env(self, val):
if isinstance(val, list):
self._env_is_list = True
def _get_env(self):
env = self.get_attribute(ENVIRONMENT)
if self._env_is_list:
self._env_is_list = False
return [('%s=%s' % (key, val)) for (key, val) in list(env.items())]
return env
('Description', ('Description', dict))
('Description')
def __deepcopy__(self, memo):
other = Description()
return self.clone(other)
('Description', 'Description')
('Description')
def clone(self, other=None):
if (not other):
other = Description()
return self._attributes_deep_copy(other) |
def make_layers(cfg, batch_norm=False):
layers = list()
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [nn.Dropout(P), conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [nn.Dropout(P), conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers) |
def get_rpt_sections_details(rpt_path):
from swmmio.defs import RPT_OBJECTS
found_sects = OrderedDict()
rpt_headers = RPT_OBJECTS.copy()
meta_data = get_rpt_metadata(rpt_path)
swmm_version = meta_data['swmm_version']
for version in SWMM5_VERSION:
version_value = float(version)
rpt_version = float(f"{swmm_version['minor']}.{swmm_version['patch']}")
if (rpt_version >= version_value):
update_rpt = normalize_inp_config(SWMM5_VERSION[version]['rpt_sections'])
rpt_headers.update(update_rpt)
with open(rpt_path) as f:
buff3line = deque()
for line in f:
buff3line.append(line)
if (len(buff3line) > 3):
buff3line.popleft()
if (('' in buff3line[0]) and ('' in buff3line[2]) and (len(buff3line[1].strip()) > 0)):
header = buff3line[1].strip()
if (header in rpt_headers):
found_sects[header] = rpt_headers[header]
else:
found_sects[header] = OrderedDict(columns=['blob'])
return found_sects |
def test_out_bounds(zarr_dataset: ChunkedDataset, cfg: dict) -> None:
gen_partial = get_partial(cfg, 0, 10, 0.1)
data = gen_partial(state_index=0, frames=np.asarray(zarr_dataset.frames[90:96]), agents=zarr_dataset.agents, tl_faces=np.zeros(0), selected_track_id=None)
assert (bool(np.all(data['target_availabilities'][:5])) is True)
assert (bool(np.all(data['target_availabilities'][5:])) is False) |
def test_default_both(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config['project']['dependencies'] = ['foo-bar-baz']
project.save_config(config)
helpers.update_project_environment(project, 'default', {'dependencies': ['baz-bar-foo']})
with project_path.as_cwd():
result = hatch('dep', 'show', 'table', '--ascii')
assert (result.exit_code == 0), result.output
assert (helpers.remove_trailing_spaces(result.output) == helpers.dedent('\n Project\n ++\n | Name |\n ++\n | foo-bar-baz |\n ++\n Env: default\n ++\n | Name |\n ++\n | baz-bar-foo |\n ++\n ')) |
def wrap_function_to_error_out_if_called_directly(function: FixtureFunction, fixture_marker: 'FixtureFunctionMarker') -> FixtureFunction:
message = 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\nbut are created automatically when test functions request them as parameters.\nSee for more information about fixtures, and\n about how to update your code.'.format(name=(fixture_marker.name or function.__name__))
(function)
def result(*args, **kwargs):
fail(message, pytrace=False)
result.__pytest_wrapped__ = _PytestWrapper(function)
return cast(FixtureFunction, result) |
class DeiTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
model_input_names = ['pixel_values']
def __init__(self, do_resize=True, size=256, resample=Image.BICUBIC, do_center_crop=True, crop_size=224, do_normalize=True, image_mean=None, image_std=None, **kwargs):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_DEFAULT_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_DEFAULT_STD)
def __call__(self, images: ImageInput, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchFeature:
valid_images = False
if (isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images)):
valid_images = True
elif isinstance(images, (list, tuple)):
if ((len(images) == 0) or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])):
valid_images = True
if (not valid_images):
raise ValueError('Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), `List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples).')
is_batched = bool((isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))))
if (not is_batched):
images = [images]
if (self.do_resize and (self.size is not None) and (self.resample is not None)):
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
if (self.do_center_crop and (self.crop_size is not None)):
images = [self.center_crop(image, self.crop_size) for image in images]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
data = {'pixel_values': images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs |
class SelfImportVisitor(ImportInfoVisitor):
def __init__(self, project, current_folder, resource):
self.project = project
self.folder = current_folder
self.resource = resource
self.to_be_fixed = set()
self.to_be_renamed = set()
self.context = importinfo.ImportContext(project, current_folder)
def visitNormalImport(self, import_stmt, import_info):
new_pairs = []
for (name, alias) in import_info.names_and_aliases:
resource = self.project.find_module(name, folder=self.folder)
if ((resource is not None) and (resource == self.resource)):
imported = name
if (alias is not None):
imported = alias
self.to_be_fixed.add(imported)
else:
new_pairs.append((name, alias))
if (not import_info._are_name_and_alias_lists_equal(new_pairs, import_info.names_and_aliases)):
import_stmt.import_info = importinfo.NormalImport(new_pairs)
def visitFromImport(self, import_stmt, import_info):
resource = import_info.get_imported_resource(self.context)
if (resource is None):
return
if (resource == self.resource):
self._importing_names_from_self(import_info, import_stmt)
return
pymodule = self.project.get_pymodule(resource)
new_pairs = []
for (name, alias) in import_info.names_and_aliases:
try:
result = pymodule[name].get_object()
if (isinstance(result, pyobjects.PyModule) and (result.get_resource() == self.resource)):
imported = name
if (alias is not None):
imported = alias
self.to_be_fixed.add(imported)
else:
new_pairs.append((name, alias))
except exceptions.AttributeNotFoundError:
new_pairs.append((name, alias))
if (not import_info._are_name_and_alias_lists_equal(new_pairs, import_info.names_and_aliases)):
import_stmt.import_info = importinfo.FromImport(import_info.module_name, import_info.level, new_pairs)
def _importing_names_from_self(self, import_info, import_stmt):
if (not import_info.is_star_import()):
for (name, alias) in import_info.names_and_aliases:
if (alias is not None):
self.to_be_renamed.add((alias, name))
import_stmt.empty_import() |
class OpenWithFileDescriptorTest(FakeFileOpenTestBase):
def test_open_with_file_descriptor(self):
file_path = self.make_path('this', 'file')
self.create_file(file_path)
fd = self.os.open(file_path, os.O_CREAT)
self.assertEqual(fd, self.open(fd, 'r').fileno())
def test_closefd_with_file_descriptor(self):
file_path = self.make_path('this', 'file')
self.create_file(file_path)
fd = self.os.open(file_path, os.O_CREAT)
fh = self.open(fd, 'r', closefd=False)
fh.close()
self.assertIsNotNone(self.filesystem.open_files[fd])
fh = self.open(fd, 'r', closefd=True)
fh.close()
self.assertIsNone(self.filesystem.open_files[fd]) |
.xfail(reason='BigQuery emulator does not support REQUIRED fields')
def test_required_types(client):
client = BigQueryClient(client)
recap_schema = client.schema('test_project', 'test_dataset', 'test_table_required')
recap_fields = recap_schema.fields
assert (recap_fields[0] == StringType(name='test_string'))
assert (recap_fields[1] == BytesType(name='test_bytes'))
assert (recap_fields[2] == IntType(bits=64, name='test_int64'))
assert (recap_fields[3] == FloatType(bits=64, name='test_float64'))
assert (recap_fields[4] == BoolType(name='test_boolean'))
assert (recap_fields[5] == IntType(bits=64, logical='build.recap.Timestamp', unit='microsecond', name='test_timestamp'))
assert (recap_fields[6] == IntType(bits=64, logical='build.recap.Timestamp', unit='microsecond', name='test_datetime'))
assert (recap_fields[7] == IntType(bits=32, logical='build.recap.Date', unit='day', name='test_date'))
assert (recap_fields[8] == IntType(bits=32, logical='build.recap.Time', unit='microsecond', name='test_time'))
assert (recap_fields[9] == BytesType(bytes_=16, variable=False, logical='build.recap.Decimal', precision=38, scale=0, name='test_numeric'))
assert (recap_fields[10] == BytesType(bytes_=32, variable=False, logical='build.recap.Decimal', precision=76, scale=0, name='test_bigdecimal')) |
class MixStyle(nn.Module):
def __init__(self, p=0.5, alpha=0.1, eps=1e-06, mix='random'):
super().__init__()
self.p = p
self.beta = torch.distributions.Beta(alpha, alpha)
self.eps = eps
self.alpha = alpha
self.mix = mix
self._activated = True
def __repr__(self):
return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps}, mix={self.mix})'
def set_activation_status(self, status=True):
self._activated = status
def update_mix_method(self, mix='random'):
self.mix = mix
def forward(self, x):
if ((not self.training) or (not self._activated)):
return x
if (random.random() > self.p):
return x
B = x.size(0)
mu = x.mean(dim=[2, 3], keepdim=True)
var = x.var(dim=[2, 3], keepdim=True)
sig = (var + self.eps).sqrt()
(mu, sig) = (mu.detach(), sig.detach())
x_normed = ((x - mu) / sig)
lmda = self.beta.sample((B, 1, 1, 1))
lmda = lmda.to(x.device)
if (self.mix == 'random'):
perm = torch.randperm(B)
elif (self.mix == 'crossdomain'):
perm = torch.arange((B - 1), (- 1), (- 1))
(perm_b, perm_a) = perm.chunk(2)
perm_b = perm_b[torch.randperm(perm_b.shape[0])]
perm_a = perm_a[torch.randperm(perm_a.shape[0])]
perm = torch.cat([perm_b, perm_a], 0)
else:
raise NotImplementedError
(mu2, sig2) = (mu[perm], sig[perm])
mu_mix = ((mu * lmda) + (mu2 * (1 - lmda)))
sig_mix = ((sig * lmda) + (sig2 * (1 - lmda)))
return ((x_normed * sig_mix) + mu_mix) |
def a1_a2_calculation(r, rdot, omega, D, M, eta):
assert (type(r) == list), 'r should be a list.'
assert (type(rdot) == list), 'rdot should be a list.'
assert (type(omega) == list), 'omega should be a list.'
assert (type(D) == float), 'D should be a float.'
assert (type(M) == float), 'M should be a float.'
assert (type(eta) == float), 'eta should be a float.'
Dkm = (D * 3.086e+19)
A1 = np.empty(len(r))
A2 = np.empty(len(r))
for i in range(len(r)):
A1[i] = (((((- 2) * M) * eta) * (1 / Dkm)) * (((rdot[i] ** 2) + ((r[i] * omega[i]) ** 2)) + (1 / r[i])))
A2[i] = (((((- 2) * M) * eta) * (1 / Dkm)) * (((2 * r[i]) * rdot[i]) * omega[i]))
A1 = list(A1)
A2 = list(A2)
return [A1, A2] |
class GroupAccumulator(Accumulator):
def __init__(self):
super().__init__()
self._groups = []
def state(self) -> Dict[(str, torch.Tensor)]:
state = super().state
state.update({'groups': self.groups})
return state
def update(self, embeddings: torch.Tensor, groups: torch.Tensor, device=None):
if (device is None):
device = embeddings.device
embeddings = embeddings.detach().to(device)
groups = groups.detach().to(device)
self._embeddings.append(embeddings)
self._groups.append(groups)
def reset(self):
super().reset()
self._groups = []
def groups(self):
return (torch.cat(self._groups) if len(self._groups) else torch.Tensor()) |
class BeforeClose(StatelessRule):
def __init__(self, offset=None, **kwargs):
self.offset = _build_offset(offset, kwargs, datetime.timedelta(minutes=1))
self._period_start = None
self._period_close = None
self._period_end = None
self._one_minute = datetime.timedelta(minutes=1)
def calculate_dates(self, dt):
period_end = self.cal.open_and_close_for_session(self.cal.minute_to_session_label(dt))[1]
self._period_end = self.cal.execution_time_from_close(period_end)
self._period_start = (self._period_end - self.offset)
self._period_close = self._period_end
def should_trigger(self, dt):
if ((self._period_start is None) or (self._period_close <= dt)):
self.calculate_dates(dt)
return (self._period_start == dt) |
def generate_object_struct(cl: ClassIR, emitter: Emitter) -> None:
seen_attrs: set[tuple[(str, RType)]] = set()
lines: list[str] = []
lines += ['typedef struct {', 'PyObject_HEAD', 'CPyVTableItem *vtable;']
if (cl.has_method('__call__') and emitter.use_vectorcall()):
lines.append('vectorcallfunc vectorcall;')
bitmap_attrs = []
for base in reversed(cl.base_mro):
if (not base.is_trait):
if base.bitmap_attrs:
if (emitter.bitmap_field((len(base.bitmap_attrs) - 1)) not in bitmap_attrs):
for i in range(0, len(base.bitmap_attrs), BITMAP_BITS):
attr = emitter.bitmap_field(i)
if (attr not in bitmap_attrs):
lines.append(f'{BITMAP_TYPE} {attr};')
bitmap_attrs.append(attr)
for (attr, rtype) in base.attributes.items():
if ((attr, rtype) not in seen_attrs):
lines.append(f'{emitter.ctype_spaced(rtype)}{emitter.attr(attr)};')
seen_attrs.add((attr, rtype))
if isinstance(rtype, RTuple):
emitter.declare_tuple_struct(rtype)
lines.append(f'}} {cl.struct_name(emitter.names)};')
lines.append('')
emitter.context.declarations[cl.struct_name(emitter.names)] = HeaderDeclaration(lines, is_type=True) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file (deprecate), change to --cfg-options instead.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args |
def fix_cache_order(item: nodes.Item, argkeys_cache: Dict[(Scope, Dict[(nodes.Item, Dict[(FixtureArgKey, None)])])], items_by_argkey: Dict[(Scope, Dict[(FixtureArgKey, 'Deque[nodes.Item]')])]) -> None:
for scope in HIGH_SCOPES:
for key in argkeys_cache[scope].get(item, []):
items_by_argkey[scope][key].appendleft(item) |
def is_transaction_expired(transaction: ContractSendEvent, block_number: BlockNumber) -> bool:
is_update_expired = (isinstance(transaction, ContractSendChannelUpdateTransfer) and (transaction.expiration < block_number))
if is_update_expired:
return True
is_secret_register_expired = (isinstance(transaction, ContractSendSecretReveal) and (transaction.expiration < block_number))
if is_secret_register_expired:
return True
return False |
def find_locales(name, dir='locale'):
locale_files = []
for walk in os.walk(((('./' + name) + '/') + dir)):
(path, dirs, files) = walk
path = path[(len(name) + 3):]
for file in files:
if (file[(- 3):] == '.mo'):
locale_files.append(os.path.join(path, file))
return locale_files |
def will_change(*, reason, version):
add_warning = _deprecated(reason=reason, version=version, category=SKCriteriaFutureWarning, action='once')
def _dec(func):
decorated_func = add_warning(func)
decorated_func.__doc__ = add_sphinx_deprecated_directive(func.__doc__, reason=reason, version=version)
return decorated_func
return _dec |
.parametrize('case', [CaseBits32ClosureConstruct, CaseBits32ArrayClosureConstruct, CaseTwoUpblksSliceComp, CaseTwoUpblksFreevarsComp])
def test_generic_behavioral_L1(case):
m = case.DUT()
m.elaborate()
tr = mk_TestBehavioralTranslator(BehavioralTranslatorL1)(m)
tr.clear(m)
tr.translate_behavioral(m)
upblk_src = tr.behavioral.upblk_srcs[m]
decl_freevars = tr.behavioral.decl_freevars[m]
assert (upblk_src == case.REF_UPBLK)
assert (decl_freevars == case.REF_FREEVAR) |
class FairseqMultiModel(BaseFairseqModel):
def __init__(self, encoders, decoders):
super().__init__()
assert (encoders.keys() == decoders.keys())
self.keys = list(encoders.keys())
for key in self.keys:
check_type(encoders[key], FairseqEncoder)
check_type(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict({key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) for key in self.keys})
def build_shared_embeddings(dicts: Dict[(str, Dictionary)], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str]=None):
shared_dict = dicts[langs[0]]
if any(((dicts[lang] != shared_dict) for lang in langs)):
raise ValueError('--share-*-embeddings requires a joined dictionary: --share-encoder-embeddings requires a joined source dictionary, --share-decoder-embeddings requires a joined target dictionary, and --share-all-embeddings requires a joint source + target dictionary.')
print(('b' * 200))
return build_embedding(shared_dict, embed_dim, pretrained_embed_path)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
raise NotImplementedError
def max_positions(self):
return {key: (self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions()) for key in self.keys}
def max_decoder_positions(self):
return min((model.decoder.max_positions() for model in self.models.values()))
def encoder(self):
return self.models[self.keys[0]].encoder
def decoder(self):
return self.models[self.keys[0]].decoder
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def load_state_dict(self, state_dict, strict=True, model_cfg=None, args: Optional[Namespace]=None):
if ((model_cfg is None) and (args is not None)):
logger.warn("using 'args' is deprecated, please update your code to use dataclass config")
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
from fairseq.checkpoint_utils import prune_state_dict
new_state_dict = prune_state_dict(state_dict, model_cfg)
return super().load_state_dict(new_state_dict, strict) |
class EarthMoverDistanceFunction(torch.autograd.Function):
def forward(ctx, xyz1, xyz2):
xyz1 = xyz1.contiguous()
xyz2 = xyz2.contiguous()
assert (xyz1.is_cuda and xyz2.is_cuda), 'Only support cuda currently.'
match = emd_cuda.approxmatch_forward(xyz1, xyz2)
cost = emd_cuda.matchcost_forward(xyz1, xyz2, match)
ctx.save_for_backward(xyz1, xyz2, match)
return cost
def backward(ctx, grad_cost):
(xyz1, xyz2, match) = ctx.saved_tensors
grad_cost = grad_cost.contiguous()
(grad_xyz1, grad_xyz2) = emd_cuda.matchcost_backward(grad_cost, xyz1, xyz2, match)
return (grad_xyz1, grad_xyz2) |
class SNMPRawCollector(parent_SNMPCollector):
def process_config(self):
super(SNMPRawCollector, self).process_config()
self.skip_list = []
def get_default_config(self):
default_config = super(SNMPRawCollector, self).get_default_config()
default_config.update({'oids': {}, 'path_prefix': 'servers', 'path_suffix': 'snmp'})
return default_config
def _precision(self, value):
value = str(value)
decimal = value.rfind('.')
if (decimal == (- 1)):
return 0
return ((len(value) - decimal) - 1)
def _skip(self, device, oid, reason=None):
self.skip_list.append((device, oid))
if (reason is not None):
self.log.warn("Muted '{}' on '{}', because: {}".format(oid, device, reason))
def _get_value_walk(self, device, oid, host, port, community):
data = self.walk(oid, host, port, community)
if (data is None):
self._skip(device, oid, 'device down (#2)')
return
self.log.debug("Data received from WALK '{}': [{}]".format(device, data))
if (len(data) != 1):
self._skip(device, oid, 'unexpected response, data has {} entries'.format(len(data)))
return
value = data.items()[0][1]
return value
def _get_value(self, device, oid, host, port, community):
data = self.get(oid, host, port, community)
if (data is None):
self._skip(device, oid, 'device down (#1)')
return
self.log.debug("Data received from GET '{}': [{}]".format(device, data))
if (len(data) == 0):
self._skip(device, oid, 'empty response, device down?')
return
if (oid not in data):
self._skip(device, oid, 'no object at OID (#1)')
return
value = data[oid]
if (value == 'No Such Object currently exists at this OID'):
self._skip(device, oid, 'no object at OID (#2)')
return
if (value == 'No Such Instance currently exists at this OID'):
return self._get_value_walk(device, oid, host, port, community)
return value
def collect_snmp(self, device, host, port, community):
self.log.debug("Collecting raw SNMP statistics from device '{}'".format(device))
dev_config = self.config['devices'][device]
if ('oids' in dev_config):
for (oid, metricName) in dev_config['oids'].items():
if ((device, oid) in self.skip_list):
self.log.debug("Skipping OID '{}' ({}) on device '{}'".format(oid, metricName, device))
continue
timestamp = time.time()
value = self._get_value(device, oid, host, port, community)
if (value is None):
continue
self.log.debug("'{}' ({}) on device '{}' - value=[{}]".format(oid, metricName, device, value))
path = '.'.join([self.config['path_prefix'], device, self.config['path_suffix'], metricName])
metric = Metric(path=path, value=value, timestamp=timestamp, precision=self._precision(value), metric_type='GAUGE')
self.publish_metric(metric) |
_fixtures(WebFixture, LayoutScenarios)
def test_navbar_can_have_layout(web_fixture, layout_scenarios):
fixture = layout_scenarios
widget = Navbar(web_fixture.view).use_layout(fixture.layout)
[navbar] = widget.children
all_classes = ['fixed-bottom', 'fixed-top', 'sticky-top']
if fixture.expected_css_class:
assert (fixture.expected_css_class in navbar.get_attribute('class').split(' '))
for not_expected_class in [i for i in all_classes if (i != fixture.expected_css_class)]:
assert (not_expected_class not in navbar.get_attribute('class').split(' ')) |
def get_interpreters(minimumVersion=None):
if sys.platform.startswith('win'):
pythons = _get_interpreters_win()
else:
pythons = _get_interpreters_posix()
pythons = set([PythonInterpreter(p) for p in pythons])
condas = set([PythonInterpreter(p) for p in _get_interpreters_conda()])
relative = set([PythonInterpreter(p) for p in _get_interpreters_relative()])
pipenvs = set([PythonInterpreter(p) for p in _get_interpreters_pipenv()])
interpreters = set.union(pythons, condas, relative, pipenvs)
minimumVersion = (minimumVersion or '0')
return _select_interpreters(interpreters, minimumVersion) |
class Logger():
fold_mode: str
colors_enabled: bool
unicode_enabled: bool
active_build_identifier: (str | None) = None
build_start_time: (float | None) = None
step_start_time: (float | None) = None
active_fold_group_name: (str | None) = None
def __init__(self) -> None:
if ((sys.platform == 'win32') and hasattr(sys.stdout, 'reconfigure')):
sys.stdout.reconfigure(encoding='utf8')
self.unicode_enabled = file_supports_unicode(sys.stdout)
ci_provider = detect_ci_provider()
if (ci_provider == CIProvider.azure_pipelines):
self.fold_mode = 'azure'
self.colors_enabled = True
elif (ci_provider == CIProvider.github_actions):
self.fold_mode = 'github'
self.colors_enabled = True
elif (ci_provider == CIProvider.travis_ci):
self.fold_mode = 'travis'
self.colors_enabled = True
elif (ci_provider == CIProvider.appveyor):
self.fold_mode = 'disabled'
self.colors_enabled = True
else:
self.fold_mode = 'disabled'
self.colors_enabled = file_supports_color(sys.stdout)
def build_start(self, identifier: str) -> None:
self.step_end()
c = self.colors
description = build_description_from_identifier(identifier)
print()
print(f'{c.bold}{c.blue}Building {identifier} wheel{c.end}')
print(f'{description}')
print()
self.build_start_time = time.time()
self.active_build_identifier = identifier
def build_end(self) -> None:
assert (self.build_start_time is not None)
assert (self.active_build_identifier is not None)
self.step_end()
c = self.colors
s = self.symbols
duration = (time.time() - self.build_start_time)
print()
print(f'{c.green}{s.done} {c.end}{self.active_build_identifier} finished in {duration:.2f}s')
self.build_start_time = None
self.active_build_identifier = None
def step(self, step_description: str) -> None:
self.step_end()
self.step_start_time = time.time()
self._start_fold_group(step_description)
def step_end(self, success: bool=True) -> None:
if (self.step_start_time is not None):
self._end_fold_group()
c = self.colors
s = self.symbols
duration = (time.time() - self.step_start_time)
if success:
print(f'{c.green}{s.done} {c.end}{duration:.2f}s'.rjust(78))
else:
print(f'{c.red}{s.error} {c.end}{duration:.2f}s'.rjust(78))
self.step_start_time = None
def step_end_with_error(self, error: (BaseException | str)) -> None:
self.step_end(success=False)
self.error(error)
def quiet(self, message: str) -> None:
c = self.colors
print(f'{c.gray}{message}{c.end}', file=sys.stderr)
def notice(self, message: str) -> None:
if (self.fold_mode == 'github'):
print(f'''::notice::{message}
''', file=sys.stderr)
else:
c = self.colors
print(f'''{c.bold}Note{c.end}: {message}
''', file=sys.stderr)
def warning(self, message: str) -> None:
if (self.fold_mode == 'github'):
print(f'''::warning::{message}
''', file=sys.stderr)
else:
c = self.colors
print(f'''{c.yellow}Warning{c.end}: {message}
''', file=sys.stderr)
def error(self, error: (BaseException | str)) -> None:
if (self.fold_mode == 'github'):
print(f'''::error::{error}
''', file=sys.stderr)
else:
c = self.colors
print(f'''{c.bright_red}Error{c.end}: {error}
''', file=sys.stderr)
def _start_fold_group(self, name: str) -> None:
self._end_fold_group()
self.active_fold_group_name = name
fold_start_pattern = FOLD_PATTERNS.get(self.fold_mode, DEFAULT_FOLD_PATTERN)[0]
identifier = self._fold_group_identifier(name)
print(fold_start_pattern.format(name=self.active_fold_group_name, identifier=identifier))
print()
sys.stdout.flush()
def _end_fold_group(self) -> None:
if self.active_fold_group_name:
fold_start_pattern = FOLD_PATTERNS.get(self.fold_mode, DEFAULT_FOLD_PATTERN)[1]
identifier = self._fold_group_identifier(self.active_fold_group_name)
print(fold_start_pattern.format(name=self.active_fold_group_name, identifier=identifier))
sys.stdout.flush()
self.active_fold_group_name = None
def _fold_group_identifier(name: str) -> str:
identifier = re.sub('\\s+', '_', name)
identifier = re.sub('[^A-Za-z\\d_]+', '', identifier)
identifier = identifier.strip('_')
return identifier.lower()[:20]
def colors(self) -> Colors:
return Colors(enabled=self.colors_enabled)
def symbols(self) -> Symbols:
return Symbols(unicode=self.unicode_enabled) |
(name='plugin.vote', signature=['array', 'integer', 'integer'], login_required=False)
def plugin_vote(plugin_id, vote, **kwargs):
try:
request = kwargs.get('request')
except:
msg = _('Invalid request.')
raise ValidationError(msg)
try:
plugin = Plugin.objects.get(pk=plugin_id)
except Plugin.DoesNotExist:
msg = (_('Plugin with id %s does not exists.') % plugin_id)
raise ValidationError(msg)
if (not (int(vote) in range(1, 6))):
msg = (_('%s is not a valid vote (1-5).') % vote)
raise ValidationError(msg)
cookies = request.COOKIES
if request.user.is_anonymous:
cookie_name = ('vote-%s.%s.%s' % (ContentType.objects.get(app_label='plugins', model='plugin').pk, plugin_id, plugin.rating.field.key[:6]))
if (not request.COOKIES.get(cookie_name, False)):
ip_address = request.META['REMOTE_ADDR']
rating = plugin.rating.get_ratings().filter(cookie__isnull=False, ip_address=ip_address, date_changed__gte=(datetime.datetime.now() - datetime.timedelta(days=10))).order_by('-date_changed')
if len(rating):
cookies = {cookie_name: rating[0].cookie}
return [plugin.rating.add(score=int(vote), user=request.user, ip_address=request.META['REMOTE_ADDR'], cookies=cookies)] |
class TestBuildingMenu(CommandTest):
def setUp(self):
super(TestBuildingMenu, self).setUp()
self.menu = BuildingMenu(caller=self.char1, obj=self.room1, title='test')
self.menu.add_choice('title', key='t', attr='key')
def test_quit(self):
self.assertFalse(self.char1.cmdset.has('building_menu'))
self.menu.open()
self.assertTrue(self.char1.cmdset.has('building_menu'))
self.call(CmdNoMatch(building_menu=self.menu), 'q')
self.assertFalse(self.char1.cmdset.has('building_menu'))
def test_setattr(self):
key = self.room1.key
self.menu.open()
self.call(CmdNoMatch(building_menu=self.menu), 't')
self.assertIsNotNone(self.menu.current_choice)
self.call(CmdNoMatch(building_menu=self.menu), 'some new title')
self.call(CmdNoMatch(building_menu=self.menu), '')
self.assertIsNone(self.menu.current_choice)
self.assertEqual(self.room1.key, 'some new title')
self.call(CmdNoMatch(building_menu=self.menu), 'q')
def test_add_choice_without_key(self):
choices = []
for i in range(20):
choices.append(self.menu.add_choice('choice', attr='test'))
self.menu._add_keys_choice()
keys = ['c', 'h', 'o', 'i', 'e', 'ch', 'ho', 'oi', 'ic', 'ce', 'cho', 'hoi', 'oic', 'ice', 'choi', 'hoic', 'oice', 'choic', 'hoice', 'choice']
for i in range(20):
self.assertEqual(choices[i].key, keys[i])
self.menu.add_choice('choice', attr='test')
with self.assertRaises(ValueError):
self.menu._add_keys_choice()
def test_callbacks(self):
self.room1.key = 'room1'
def on_enter(caller, menu):
caller.msg('on_enter:{}'.format(menu.title))
def on_nomatch(caller, string, choice):
caller.msg('on_nomatch:{},{}'.format(string, choice.key))
def on_leave(caller, obj):
caller.msg('on_leave:{}'.format(obj.key))
self.menu.add_choice('test', key='e', on_enter=on_enter, on_nomatch=on_nomatch, on_leave=on_leave)
self.call(CmdNoMatch(building_menu=self.menu), 'e', 'on_enter:test')
self.call(CmdNoMatch(building_menu=self.menu), 'ok', 'on_nomatch:ok,e')
self.call(CmdNoMatch(building_menu=self.menu), '', 'on_leave:room1')
self.call(CmdNoMatch(building_menu=self.menu), 'q')
def test_multi_level(self):
def on_nomatch_t1(caller, menu):
menu.move('whatever')
def on_nomatch_t2(caller, menu):
menu.move('t3')
t1 = self.menu.add_choice('what', key='t1', on_nomatch=on_nomatch_t1)
t2 = self.menu.add_choice('and', key='t1.*', on_nomatch=on_nomatch_t2)
t3 = self.menu.add_choice('why', key='t1.*.t3')
self.menu.open()
self.assertIn(t1, self.menu.relevant_choices)
self.assertNotIn(t2, self.menu.relevant_choices)
self.assertNotIn(t3, self.menu.relevant_choices)
self.assertIsNone(self.menu.current_choice)
self.call(CmdNoMatch(building_menu=self.menu), 't1')
self.assertEqual(self.menu.current_choice, t1)
self.assertNotIn(t1, self.menu.relevant_choices)
self.assertIn(t2, self.menu.relevant_choices)
self.assertNotIn(t3, self.menu.relevant_choices)
self.call(CmdNoMatch(building_menu=self.menu), 't2')
self.assertEqual(self.menu.current_choice, t2)
self.assertNotIn(t1, self.menu.relevant_choices)
self.assertNotIn(t2, self.menu.relevant_choices)
self.assertIn(t3, self.menu.relevant_choices)
self.call(CmdNoMatch(building_menu=self.menu), 't3')
self.assertEqual(self.menu.current_choice, t3)
self.assertNotIn(t1, self.menu.relevant_choices)
self.assertNotIn(t2, self.menu.relevant_choices)
self.assertNotIn(t3, self.menu.relevant_choices)
self.call(CmdNoMatch(building_menu=self.menu), '')
self.assertEqual(self.menu.current_choice, t2)
self.assertNotIn(t1, self.menu.relevant_choices)
self.assertNotIn(t2, self.menu.relevant_choices)
self.assertIn(t3, self.menu.relevant_choices)
self.call(CmdNoMatch(building_menu=self.menu), '')
self.assertEqual(self.menu.current_choice, t1)
self.assertNotIn(t1, self.menu.relevant_choices)
self.assertIn(t2, self.menu.relevant_choices)
self.assertNotIn(t3, self.menu.relevant_choices)
self.call(CmdNoMatch(building_menu=self.menu), '')
self.assertIn(t1, self.menu.relevant_choices)
self.assertNotIn(t2, self.menu.relevant_choices)
self.assertNotIn(t3, self.menu.relevant_choices)
self.assertIsNone(self.menu.current_choice)
self.call(CmdNoMatch(building_menu=self.menu), 'q')
def test_submenu(self):
def open_exit(menu):
menu.open_submenu('evennia.contrib.tests.Submenu', self.exit)
return False
self.menu.add_choice('exit', key='x', on_enter=open_exit)
self.menu.open()
self.call(CmdNoMatch(building_menu=self.menu), 'x')
self.menu = self.char1.ndb._building_menu
self.call(CmdNoMatch(building_menu=self.menu), 't')
self.call(CmdNoMatch(building_menu=self.menu), 'in')
self.call(CmdNoMatch(building_menu=self.menu), '')
self.call(CmdNoMatch(building_menu=self.menu), '')
self.menu = self.char1.ndb._building_menu
self.assertEqual(self.char1.ndb._building_menu.obj, self.room1)
self.call(CmdNoMatch(building_menu=self.menu), 'q')
self.assertEqual(self.exit.key, 'in') |
def test_jsonifability():
res = TwitterDictResponse({'a': 'b'})
p = json.dumps(res)
res2 = json.loads(p)
assert (res == res2)
assert (res2['a'] == 'b')
res = TwitterListResponse([1, 2, 3])
p = json.dumps(res)
res2 = json.loads(p)
assert (res == res2)
assert (res2[2] == 3) |
def get_articulation_state(art):
root_link = art.get_links()[0]
base_pose = root_link.get_pose()
base_vel = root_link.get_velocity()
base_ang_vel = root_link.get_angular_velocity()
qpos = art.get_qpos()
qvel = art.get_qvel()
return (base_pose.p, base_pose.q, base_vel, base_ang_vel, qpos, qvel) |
class TrainTest(unittest.TestCase):
def _run_train(cls) -> None:
train(embedding_dim=16, num_iterations=10)
_if_asan
def test_train_function(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
lc = LaunchConfig(min_nodes=1, max_nodes=1, nproc_per_node=2, run_id=str(uuid.uuid4()), rdzv_backend='c10d', rdzv_endpoint=os.path.join(tmpdir, 'rdzv'), rdzv_configs={'store_type': 'file'}, start_method='spawn', monitor_interval=1, max_restarts=0)
elastic_launch(config=lc, entrypoint=self._run_train)() |
class TestOCSPEdDSA():
.supported(only_if=(lambda backend: backend.ed25519_supported()), skip_message='Requires OpenSSL with Ed25519 support / OCSP')
def test_invalid_algorithm(self, backend):
builder = ocsp.OCSPResponseBuilder()
(cert, issuer) = _cert_and_issuer()
private_key = ed25519.Ed25519PrivateKey.generate()
(root_cert, _) = _generate_root(private_key, None)
current_time = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None).replace(microsecond=0)
this_update = (current_time - datetime.timedelta(days=1))
next_update = (this_update + datetime.timedelta(days=7))
revoked_date = (this_update - datetime.timedelta(days=300))
builder = builder.responder_id(ocsp.OCSPResponderEncoding.NAME, root_cert).add_response(cert, issuer, hashes.SHA1(), ocsp.OCSPCertStatus.REVOKED, this_update, next_update, revoked_date, x509.ReasonFlags.key_compromise)
with pytest.raises(ValueError):
builder.sign(private_key, hashes.SHA256())
.supported(only_if=(lambda backend: backend.ed25519_supported()), skip_message='Requires OpenSSL with Ed25519 support / OCSP')
def test_sign_ed25519(self, backend):
builder = ocsp.OCSPResponseBuilder()
(cert, issuer) = _cert_and_issuer()
private_key = ed25519.Ed25519PrivateKey.generate()
(root_cert, _) = _generate_root(private_key, None)
current_time = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None).replace(microsecond=0)
this_update = (current_time - datetime.timedelta(days=1))
next_update = (this_update + datetime.timedelta(days=7))
revoked_date = (this_update - datetime.timedelta(days=300))
builder = builder.responder_id(ocsp.OCSPResponderEncoding.NAME, root_cert).add_response(cert, issuer, hashes.SHA1(), ocsp.OCSPCertStatus.REVOKED, this_update, next_update, revoked_date, x509.ReasonFlags.key_compromise)
resp = builder.sign(private_key, None)
assert (resp.certificate_status == ocsp.OCSPCertStatus.REVOKED)
assert (resp.revocation_time == revoked_date)
assert (resp.revocation_reason is x509.ReasonFlags.key_compromise)
assert (resp.this_update == this_update)
assert (resp.next_update == next_update)
assert (resp.signature_hash_algorithm is None)
assert (resp.signature_algorithm_oid == x509.SignatureAlgorithmOID.ED25519)
private_key.public_key().verify(resp.signature, resp.tbs_response_bytes)
.supported(only_if=(lambda backend: backend.ed448_supported()), skip_message='Requires OpenSSL with Ed448 support / OCSP')
def test_sign_ed448(self, backend):
builder = ocsp.OCSPResponseBuilder()
(cert, issuer) = _cert_and_issuer()
private_key = ed448.Ed448PrivateKey.generate()
(root_cert, _) = _generate_root(private_key, None)
current_time = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None).replace(microsecond=0)
this_update = (current_time - datetime.timedelta(days=1))
next_update = (this_update + datetime.timedelta(days=7))
revoked_date = (this_update - datetime.timedelta(days=300))
builder = builder.responder_id(ocsp.OCSPResponderEncoding.NAME, root_cert).add_response(cert, issuer, hashes.SHA1(), ocsp.OCSPCertStatus.REVOKED, this_update, next_update, revoked_date, x509.ReasonFlags.key_compromise)
resp = builder.sign(private_key, None)
assert (resp.certificate_status == ocsp.OCSPCertStatus.REVOKED)
assert (resp.revocation_time == revoked_date)
assert (resp.revocation_reason is x509.ReasonFlags.key_compromise)
assert (resp.this_update == this_update)
assert (resp.next_update == next_update)
assert (resp.signature_hash_algorithm is None)
assert (resp.signature_algorithm_oid == x509.SignatureAlgorithmOID.ED448)
private_key.public_key().verify(resp.signature, resp.tbs_response_bytes) |
def extract_hyperparameters_from_trainer(trainer):
hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS}
if (trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]):
hyperparameters['distributed_type'] = ('multi-GPU' if (trainer.args.parallel_mode == ParallelMode.DISTRIBUTED) else trainer.args.parallel_mode.value)
if (trainer.args.world_size > 1):
hyperparameters['num_devices'] = trainer.args.world_size
if (trainer.args.gradient_accumulation_steps > 1):
hyperparameters['gradient_accumulation_steps'] = trainer.args.gradient_accumulation_steps
total_train_batch_size = ((trainer.args.train_batch_size * trainer.args.world_size) * trainer.args.gradient_accumulation_steps)
if (total_train_batch_size != hyperparameters['train_batch_size']):
hyperparameters['total_train_batch_size'] = total_train_batch_size
total_eval_batch_size = (trainer.args.eval_batch_size * trainer.args.world_size)
if (total_eval_batch_size != hyperparameters['eval_batch_size']):
hyperparameters['total_eval_batch_size'] = total_eval_batch_size
if trainer.args.adafactor:
hyperparameters['optimizer'] = 'Adafactor'
else:
hyperparameters['optimizer'] = f'Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and epsilon={trainer.args.adam_epsilon}'
hyperparameters['lr_scheduler_type'] = trainer.args.lr_scheduler_type.value
if (trainer.args.warmup_ratio != 0.0):
hyperparameters['lr_scheduler_warmup_ratio'] = trainer.args.warmup_ratio
if (trainer.args.warmup_steps != 0.0):
hyperparameters['lr_scheduler_warmup_steps'] = trainer.args.warmup_steps
if (trainer.args.max_steps != (- 1)):
hyperparameters['training_steps'] = trainer.args.max_steps
else:
hyperparameters['num_epochs'] = trainer.args.num_train_epochs
if trainer.args.fp16:
if trainer.use_amp:
hyperparameters['mixed_precision_training'] = 'Native AMP'
elif trainer.use_apex:
hyperparameters['mixed_precision_training'] = f'Apex, opt level {trainer.args.fp16_opt_level}'
if (trainer.args.label_smoothing_factor != 0.0):
hyperparameters['label_smoothing_factor'] = trainer.args.label_smoothing_factor
return hyperparameters |
def build_custom_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
params: List[Dict[(str, Any)]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
custom_multiplier_name = cfg.SOLVER.CUSTOM_MULTIPLIER_NAME
optimizer_type = cfg.SOLVER.OPTIMIZER
for (key, value) in model.named_parameters(recurse=True):
if (not value.requires_grad):
continue
if (value in memo):
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if ('backbone' in key):
lr = (lr * cfg.SOLVER.BACKBONE_MULTIPLIER)
if match_name_keywords(key, custom_multiplier_name):
lr = (lr * cfg.SOLVER.CUSTOM_MULTIPLIER)
param = {'params': [value], 'lr': lr}
if (optimizer_type != 'ADAMW'):
param['weight_decay'] = weight_decay
params += [param]
def maybe_add_full_model_gradient_clipping(optim):
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (cfg.SOLVER.CLIP_GRADIENTS.ENABLED and (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == 'full_model') and (clip_norm_val > 0.0))
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x['params'] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return (FullModelGradientClippingOptimizer if enable else optim)
if (optimizer_type == 'SGD'):
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV)
elif (optimizer_type == 'ADAMW'):
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(params, cfg.SOLVER.BASE_LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
else:
raise NotImplementedError(f'no optimizer type {optimizer_type}')
if (not (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == 'full_model')):
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer |
class Gamma(Distribution):
def __init__(self, name, mean, stdv, input_type=None, startpoint=None):
if (input_type is None):
beta = (mean / (stdv ** 2))
alpha = ((mean ** 2) / (stdv ** 2))
else:
beta = mean
alpha = stdv
self.dist_obj = gamma(a=alpha, scale=(1 / beta))
super().__init__(name=name, dist_obj=self.dist_obj, startpoint=startpoint)
self.dist_type = 'Gamma' |
def import_CUHK03(dataset_dir, detected=False):
cuhk03_dir = os.path.join(dataset_dir, 'CUHK03')
if (not os.path.exists(cuhk03_dir)):
Print('Please Download the CUHK03 Dataset')
if (not detected):
cuhk03_dir = os.path.join(cuhk03_dir, 'labeled')
else:
cuhk03_dir = os.path.join(cuhk03_dir, 'detected')
campair_list = os.listdir(cuhk03_dir)
name_dict = {}
for campair in campair_list:
cam1_list = []
cam1_list = os.listdir(os.path.join(cuhk03_dir, campair, 'cam1'))
cam2_list = os.listdir(os.path.join(cuhk03_dir, campair, 'cam2'))
for file in cam1_list:
id = ((campair[1:] + '-') + file.split('-')[0])
if (id not in name_dict):
name_dict[id] = []
name_dict[id].append([])
name_dict[id].append([])
name_dict[id][0].append(os.path.join(cuhk03_dir, campair, 'cam1', file))
for file in cam2_list:
id = ((campair[1:] + '-') + file.split('-')[0])
if (id not in name_dict):
name_dict[id] = []
name_dict[id].append([])
name_dict[id].append([])
name_dict[id][1].append(os.path.join(cuhk03_dir, campair, 'cam2', file))
return name_dict |
def idx2data(batchgroup, x_data, x_char_data, answerData, lengthData):
x_minibatch = list()
y_minibatch = list()
xlen_minibatch = list()
x_char_minibatch = list()
for idx in batchgroup:
x_minibatch.append(x_data[idx][:])
y_minibatch.append(answerData[idx][:])
x_char_minibatch.append(x_char_data[idx][:])
xlen_minibatch.append(lengthData[idx])
return (x_minibatch, y_minibatch, xlen_minibatch, x_char_minibatch) |
def build_cpd_dawg(morph, cpd, min_word_freq):
words = [word for (word, fd) in cpd.items() if (fd.freqdist().N() >= min_word_freq)]
prob_data = filter((lambda rec: (not _all_the_same(rec[1]))), ((word, _tag_probabilities(morph, word, cpd)) for word in words))
dawg_data = (((word, tag), prob) for (word, probs) in prob_data for (tag, prob) in probs.items())
return ConditionalProbDistDAWG(dawg_data) |
def write_file_to_zookeeper(zookeeper: KazooClient, source_file: BinaryIO, dest_path: str) -> bool:
logger.info('Writing to %s in ZooKeeper...', dest_path)
try:
(current_data, stat) = zookeeper.get(dest_path)
current_version = stat.version
except NoNodeError:
raise NodeDoesNotExistError
new_data = source_file.read()
if (current_data == new_data):
logger.info('No changes detected. Not writing.')
return False
try:
current_text = current_data.decode('utf8')
new_text = new_data.decode('utf8')
except UnicodeDecodeError:
logger.info('Skipping diff, data appears to be binary.')
else:
diff = difflib.unified_diff(current_text.splitlines(), new_text.splitlines())
for line in diff:
logger.info(line)
try:
zookeeper.set(dest_path, new_data, version=current_version)
except BadVersionError:
raise UnexpectedChangeError
logger.info('Wrote data to Zookeeper.')
return True |
def and_conditional_maps(m1: TypeMap, m2: TypeMap, use_meet: bool=False) -> TypeMap:
if ((m1 is None) or (m2 is None)):
return None
result = m2.copy()
m2_keys = {literal_hash(n2) for n2 in m2}
for n1 in m1:
if ((literal_hash(n1) not in m2_keys) or isinstance(get_proper_type(m1[n1]), AnyType)):
result[n1] = m1[n1]
if use_meet:
for n1 in m1:
for n2 in m2:
if (literal_hash(n1) == literal_hash(n2)):
result[n1] = meet_types(m1[n1], m2[n2])
return result |
def get_config_from_root(root):
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.ConfigParser()
with open(setup_cfg, 'r') as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get('versioneer', 'VCS')
section = parser['versioneer']
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = section.get('style', '')
cfg.versionfile_source = section.get('versionfile_source')
cfg.versionfile_build = section.get('versionfile_build')
cfg.tag_prefix = section.get('tag_prefix')
if (cfg.tag_prefix in ("''", '""', None)):
cfg.tag_prefix = ''
cfg.parentdir_prefix = section.get('parentdir_prefix')
cfg.verbose = section.get('verbose')
return cfg |
def qt_message_handler(msg_type: qtcore.QtMsgType, context: qtcore.QMessageLogContext, msg: Optional[str]) -> None:
qt_to_logging = {qtcore.QtMsgType.QtDebugMsg: logging.DEBUG, qtcore.QtMsgType.QtWarningMsg: logging.WARNING, qtcore.QtMsgType.QtCriticalMsg: logging.ERROR, qtcore.QtMsgType.QtFatalMsg: logging.CRITICAL, qtcore.QtMsgType.QtInfoMsg: logging.INFO}
suppressed_msgs = ['libpng warning: iCCP: Not recognizing known sRGB profile that has been edited', 'libpng warning: iCCP: known incorrect sRGB profile', 'OpenType support missing for script ', 'QNetworkReplyImplPrivate::error: Internal problem, this method must only be called once.', 'load glyph failed ', 'content-type missing in HTTP POST, defaulting to application/x-www-form-urlencoded. Use QNetworkRequest::setHeader() to fix this problem.', 'Using blocking call!', '"Method "GetAll" with signature "s" on interface "org.freedesktop.DBus.Properties" doesn\'t exist', '"Method \\"GetAll\\" with signature \\"s\\" on interface \\"org.freedesktop.DBus.Properties\\" doesn\'t exist\\n"', 'WOFF support requires QtWebKit to be built with zlib support.', 'QXcbWindow: Unhandled client message: "_E_', 'QXcbWindow: Unhandled client message: "_ECORE_', 'QXcbWindow: Unhandled client message: "_GTK_', 'SetProcessDpiAwareness failed:', 'QObject::connect: Cannot connect (null)::stateChanged(QNetworkSession::State) to QNetworkReplyHttpImpl::_q_networkSessionStateChanged(QNetworkSession::State)', "Image of format '' blocked because it is not considered safe. If you are sure it is safe to do so, you can white-list the format by setting the environment variable QTWEBKIT_IMAGEFORMAT_WHITELIST=", 'QSslSocket: cannot resolve ', 'QSslSocket: cannot call unresolved function ', 'Remote debugging server started successfully. Try pointing a Chromium-based browser to ', 'QXcbClipboard: SelectionRequest too old', 'QXcbWindow: Unhandled client message: ""', 'QObject::disconnect: Unexpected null parameter', 'Attribute Qt::AA_ShareOpenGLContexts must be set before QCoreApplication is created.', 'GL format 0 is not supported']
if (sys.platform == 'darwin'):
suppressed_msgs += ['virtual void QSslSocketBackendPrivate::transmit() SSLRead failed with: -9805']
if (not msg):
msg = 'Logged empty message!'
if any((msg.strip().startswith(pattern) for pattern in suppressed_msgs)):
level = logging.DEBUG
elif ((context.category == 'qt.webenginecontext') and (msg.strip().startswith('GL Type: ') or msg.strip().startswith('GLImplementation:'))):
level = logging.DEBUG
else:
level = qt_to_logging[msg_type]
if (context.line is None):
lineno = (- 1)
else:
lineno = context.line
if (context.function is None):
func = 'none'
elif (':' in context.function):
func = '"{}"'.format(context.function)
else:
func = context.function
if ((context.category is None) or (context.category == 'default')):
name = 'qt'
else:
name = ('qt-' + context.category)
if (msg.splitlines()[0] == 'This application failed to start because it could not find or load the Qt platform plugin "xcb".'):
msg += '\n\nOn Archlinux, this should fix the problem:\n pacman -S libxkbcommon-x11'
faulthandler.disable()
assert (_args is not None)
if _args.debug:
stack: Optional[str] = ''.join(traceback.format_stack())
else:
stack = None
record = log.qt.makeRecord(name=name, level=level, fn=context.file, lno=lineno, msg=msg, args=(), exc_info=None, func=func, sinfo=stack)
log.qt.handle(record) |
class TemplateTagsTest(unittest.TestCase):
def test_iso_time_tag(self):
now = datetime.datetime(2014, 1, 1, 12, 0)
template = Template('{% load cms %}{% iso_time_tag now %}')
rendered = template.render(Context({'now': now}))
self.assertIn('<time datetime="2014-01-01T12:00:00"><span class="say-no-more">2014-</span>01-01</time>', rendered) |
def str_q2b(text):
ustring = text
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
if (inside_code == 12288):
inside_code = 32
elif (65281 <= inside_code <= 65374):
inside_code -= 65248
rstring += chr(inside_code)
return rstring |
.skip(reason='web RTC is disabled')
.parametrize('matrix_server_count', [1])
.parametrize('number_of_transports', [2])
.parametrize('capabilities', [CapabilitiesConfig(web_rtc=True)])
def test_web_rtc_message_sync(matrix_transports):
(transport0, transport1) = matrix_transports
transport1_messages = set()
raiden_service0 = MockRaidenService()
raiden_service1 = MockRaidenService()
def mock_handle_web_rtc_messages(message_data, partner_address):
messages = validate_and_parse_message(message_data, partner_address)
transport1_messages.update(messages)
transport1._web_rtc_manager._handle_message_callback = mock_handle_web_rtc_messages
transport0.start(raiden_service0, None)
transport1.start(raiden_service1, None)
with Timeout(TIMEOUT_WEB_RTC_CONNECTION):
while (not transport0._web_rtc_manager.has_ready_channel(raiden_service1.address)):
gevent.sleep(1)
while (not transport1._web_rtc_manager.has_ready_channel(raiden_service0.address)):
gevent.sleep(1)
queue_identifier = QueueIdentifier(recipient=transport1._raiden_service.address, canonical_identifier=factories.UNIT_CANONICAL_ID)
raiden0_queues = views.get_all_messagequeues(views.state_from_raiden(raiden_service0))
raiden0_queues[queue_identifier] = []
for i in range(5):
message = Processed(message_identifier=MessageID(i), signature=EMPTY_SIGNATURE)
raiden0_queues[queue_identifier].append(message)
transport0._raiden_service.sign(message)
transport0.send_async([MessagesQueue(queue_identifier, [(message, None)])])
with Timeout(TIMEOUT_MESSAGE_RECEIVE):
while (not (len(transport1_messages) == 5)):
gevent.sleep(0.1) |
class TestApi():
def __init__(self):
self.apiex = APIExerciser.APIExerciser(None, True, 'TestUser', 'pwhere')
def test_login(self):
assert (self.apiex.api.login('crapp', 'wrong pw') is False)
def test_random(self):
for i in range(0, 100):
self.apiex.testAPI() |
def main(args):
model_save_path = preprocess(args)
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, 'BoolTensor'):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
all_zero_indexes = (features.sum(dim=(- 1)) == 0)
num_zero = all_zero_indexes.sum()
if (num_zero > 0):
features = (features + 1e-15)
num_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
logging.info(("----Data statistics------'\n #Edges %d\n #Classes %d \n #Train samples %d\n #Val samples %d\n #Test samples %d" % (n_edges, n_classes, train_mask.int().sum().item(), val_mask.int().sum().item(), test_mask.int().sum().item())))
cuda = args.cuda
if cuda:
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
g = data.graph
if (args.self_loop == 1):
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
else:
g = DGLGraph(g)
zero_degree_idxes = (g.in_degrees(np.arange(0, g.number_of_nodes())) == 0)
num_zero_degree = zero_degree_idxes.sum()
if (num_zero_degree > 0):
zero_degree_nodes = torch.arange(0, g.number_of_nodes(), dtype=torch.long)[zero_degree_idxes]
g.add_edges(zero_degree_nodes, zero_degree_nodes)
(g, self_loop_number) = reorginize_self_loop_edges(graph=g)
n_edges = g.number_of_edges()
edge_id = torch.arange(0, n_edges, dtype=torch.long)
g.edata.update({'e_id': edge_id})
if cuda:
for (key, value) in g.ndata.items():
g.ndata[key] = value.cuda()
for (key, value) in g.edata.items():
g.edata[key] = value.cuda()
heads = ([args.num_heads] * args.num_layers)
model = MAGNA(g=g, num_layers=args.num_layers, input_dim=num_feats, project_dim=args.project_dim, hidden_dim=args.num_hidden, num_classes=n_classes, heads=heads, feat_drop=args.in_drop, attn_drop=args.attn_drop, alpha=args.alpha, hop_num=args.hop_num, top_k=args.top_k, topk_type=args.topk_type, edge_drop=args.edge_drop, layer_norm=args.layer_norm, feed_forward=args.feed_forward, self_loop_number=self_loop_number, self_loop=(args.self_loop == 1), head_tail_shared=(args.head_tail_shared == 1), negative_slope=args.negative_slope)
if cuda:
model = model.cuda()
logging.info(model)
if args.early_stop:
stopper = EarlyStopping(patience=100)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
weight_decay = args.weight_decay
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=args.epochs, eta_min=1e-08)
dur = []
best_valid_acc = 0.0
test_acc = 0.0
patience_count = 0
best_model_name = None
for epoch in range(args.epochs):
model.train()
if (epoch >= 3):
t0 = time.time()
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
scheduler.step()
if (epoch >= 3):
dur.append((time.time() - t0))
train_acc = accuracy(logits[train_mask], labels[train_mask])
if args.fastmode:
val_acc = accuracy(logits[val_mask], labels[val_mask])
else:
(val_acc, logits) = evaluate(model, features, labels, val_mask)
if args.early_stop:
if stopper.step(val_acc, model):
break
if (val_acc >= best_valid_acc):
best_valid_acc = val_acc
acc = accuracy(logits[test_mask], labels[test_mask])
model_name = (((((str(epoch) + '_vacc_') + str(best_valid_acc)) + '_tacc_') + str(acc)) + '.pt')
if (not cuda):
model_path_name = os.path.join(model_save_path, model_name)
save_model(model, model_save_path=model_path_name, step=epoch)
best_model_name = model_name
test_acc = acc
patience_count = 0
else:
patience_count = (patience_count + 1)
logging.info('Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | TrainAcc {:.4f} | ValAcc {:.4f} | ETputs(KTEPS) {:.2f}'.format(epoch, np.mean(dur), loss.item(), train_acc, val_acc, ((n_edges / np.mean(dur)) / 1000)))
if (patience_count >= args.patience):
break
logging.info('\n')
if args.early_stop:
model.load_state_dict(torch.load('es_checkpoint.pt'))
(final_test_acc, _) = evaluate(model, features, labels, test_mask)
logging.info('Best validation acc: {}\nBest test acc: {} \nFinal test acc: {}'.format(best_valid_acc, test_acc, final_test_acc))
logging.info('Best model name: {}'.format(best_model_name))
remove_models(model_save_path, best_model_name=best_model_name) |
def subsample_labels(labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int):
positive = nonzero_tuple(((labels != (- 1)) & (labels != bg_label)))[0]
negative = nonzero_tuple((labels == bg_label))[0]
num_pos = int((num_samples * positive_fraction))
num_pos = min(positive.numel(), num_pos)
num_neg = (num_samples - num_pos)
num_neg = min(negative.numel(), num_neg)
perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
pos_idx = positive[perm1]
neg_idx = negative[perm2]
return (pos_idx, neg_idx) |
class EchoesHintDistributor(HintDistributor):
def num_joke_hints(self) -> int:
return 2
async def get_guaranteed_hints(self, patches: GamePatches, prefill: PreFillParams) -> list[HintTargetPrecision]:
def g(index, loc):
return (PickupIndex(index), PrecisionPair(loc, HintItemPrecision.DETAILED, include_owner=False))
return [g(24, HintLocationPrecision.LIGHT_SUIT_LOCATION), g(43, HintLocationPrecision.GUARDIAN), g(79, HintLocationPrecision.GUARDIAN), g(115, HintLocationPrecision.GUARDIAN)]
async def assign_other_hints(self, patches: GamePatches, identifiers: list[NodeIdentifier], prefill: PreFillParams) -> GamePatches:
all_hint_identifiers = [identifier for identifier in identifiers if (identifier not in patches.hints)]
prefill.rng.shuffle(all_hint_identifiers)
temple_hints = list(enum_lib.iterate_enum(HintDarkTemple))
while (all_hint_identifiers and temple_hints):
identifier = all_hint_identifiers.pop()
patches = patches.assign_hint(identifier, Hint(HintType.RED_TEMPLE_KEY_SET, None, dark_temple=temple_hints.pop(0)))
identifiers.remove(identifier)
return patches
def precision_pair_weighted_list(self) -> list[PrecisionPair]:
tiers = {(HintLocationPrecision.DETAILED, HintItemPrecision.DETAILED, False): 3, (HintLocationPrecision.DETAILED, HintItemPrecision.DETAILED, True): 2, (HintLocationPrecision.DETAILED, HintItemPrecision.PRECISE_CATEGORY, True): 2, (HintLocationPrecision.DETAILED, HintItemPrecision.GENERAL_CATEGORY, True): 1, (HintLocationPrecision.REGION_ONLY, HintItemPrecision.DETAILED, False): 2, (HintLocationPrecision.REGION_ONLY, HintItemPrecision.PRECISE_CATEGORY, True): 1}
hints = []
for (params, quantity) in tiers.items():
hints.extend(([PrecisionPair(*params)] * quantity))
return hints
def _get_relative_hint_providers(self):
return [self._relative(HintLocationPrecision.RELATIVE_TO_AREA, True, HintRelativeAreaName.NAME, 4), self._relative(HintLocationPrecision.RELATIVE_TO_AREA, False, HintRelativeAreaName.NAME, 3), self._relative(HintLocationPrecision.RELATIVE_TO_INDEX, True, HintItemPrecision.DETAILED, 4)]
async def assign_precision_to_hints(self, patches: GamePatches, rng: Random, player_pool: PlayerPool, player_state: PlayerState) -> GamePatches:
assert isinstance(player_pool.configuration, EchoesConfiguration)
if player_pool.configuration.hints.item_hints:
return self.add_hints_precision(player_state, patches, rng)
else:
return self.replace_hints_without_precision_with_jokes(patches) |
def clean_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if (os.path.isfile(file_path) or os.path.islink(file_path)):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(f'Failed to delete {file_path}. Reason: {e}') |
class SAPM():
def setup(self):
set_weather_data(self)
if (Version(pvlib.__version__) >= Version('0.7.0')):
kwargs = pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS['sapm']
kwargs = kwargs['open_rack_glass_glass']
self.sapm_cell_wrapper = partial(pvlib.temperature.sapm_cell, **kwargs)
else:
sapm_celltemp = pvlib.pvsystem.sapm_celltemp
def sapm_cell_wrapper(poa_global, temp_air, wind_speed):
return sapm_celltemp(poa_global, wind_speed, temp_air)
self.sapm_cell_wrapper = sapm_cell_wrapper
def time_sapm_cell(self):
self.sapm_cell_wrapper(self.poa, self.tamb, self.wind_speed) |
class example_result(object):
__slots__ = ('success', 'exc', 'err')
def __init__(self, success=None, exc=None, err=None):
self.success = success
self.exc = exc
self.err = err
def read(self, iprot):
if ((iprot._fast_decode is not None) and isinstance(iprot.trans, TTransport.CReadableTransport) and (self.thrift_spec is not None)):
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if (ftype == TType.STOP):
break
if (fid == 0):
if (ftype == TType.BOOL):
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif (fid == 1):
if (ftype == TType.STRUCT):
self.exc = ExpectedException.read(iprot)
else:
iprot.skip(ftype)
elif (fid == 2):
if (ftype == TType.STRUCT):
self.err = baseplate.thrift.ttypes.Error.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if ((oprot._fast_encode is not None) and (self.thrift_spec is not None)):
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('example_result')
if (self.success is not None):
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if (self.exc is not None):
oprot.writeFieldBegin('exc', TType.STRUCT, 1)
self.exc.write(oprot)
oprot.writeFieldEnd()
if (self.err is not None):
oprot.writeFieldBegin('err', TType.STRUCT, 2)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = [('%s=%r' % (key, getattr(self, key))) for key in self.__slots__]
return ('%s(%s)' % (self.__class__.__name__, ', '.join(L)))
def __eq__(self, other):
if (not isinstance(other, self.__class__)):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if (my_val != other_val):
return False
return True
def __ne__(self, other):
return (not (self == other)) |
_fixtures(WebFixture, DhtmlFixture)
def test_i18n_dhtml(web_fixture, dhtml_fixture):
class MainUI(UserInterface):
def assemble(self):
self.define_page(HTML5Page).use_layout(BasicPageLayout())
self.define_user_interface('/dhtml_ui', DhtmlUI, {'main_slot': 'main'}, name='test_ui', static_div_name='astatic')
fixture = dhtml_fixture
web_fixture.config.web.static_root = fixture.static_dir.name
wsgi_app = web_fixture.new_wsgi_app(site_root=MainUI)
browser = Browser(wsgi_app)
def stubbed_create_context_for_request():
return LocaleContextStub(locale='af')
with replaced(wsgi_app.create_context_for_request, stubbed_create_context_for_request):
browser.open('/dhtml_ui/correctfile.d.html')
assert (browser.title == 'Afrikaans bo!') |
class Stream(ctypes.Structure):
_fields_ = [('CR', ctypes.c_uint32), ('NDTR', ctypes.c_uint32), ('PAR', ctypes.c_uint32), ('M0AR', ctypes.c_uint32), ('M1AR', ctypes.c_uint32), ('FCR', ctypes.c_uint32)]
def enable(self):
return (self.CR & DMA_SxCR.EN)
def transfer_direction(self):
return (self.CR & DMA_SxCR.DIR)
def transfer_peripheral_size(self):
PSIZE = (self.CR & DMA_SxCR.PSIZE)
if (PSIZE == DMA.PDATAALIGN_BYTE):
return 1
if (PSIZE == DMA.PDATAALIGN_HALFWORD):
return 2
if (PSIZE == DMA.PDATAALIGN_WORD):
return 4
def transfer_memory_size(self):
MSIZE = (self.CR & DMA_SxCR.MSIZE)
if (MSIZE == DMA.MDATAALIGN_BYTE):
return 1
if (MSIZE == DMA.MDATAALIGN_HALFWORD):
return 2
if (MSIZE == DMA.MDATAALIGN_WORD):
return 4
def step(self, mem):
if (self.NDTR == 0):
return
dir_flag = (self.transfer_direction() == DMA.MEMORY_TO_PERIPH)
psize = self.transfer_peripheral_size()
msize = self.transfer_memory_size()
(src, dst) = ((self.M0AR, self.PAR) if dir_flag else (self.PAR, self.M0AR))
(src_size, dst_size) = ((msize, psize) if dir_flag else (psize, msize))
data = bytes(mem.read(src, src_size)).ljust(dst_size)[:dst_size]
mem.write(dst, data)
self.NDTR -= 1
if (self.CR & DMA_SxCR.MINC):
self.M0AR += msize
if (self.CR & DMA_SxCR.PINC):
self.PAR += psize
if (self.NDTR == 0):
self.CR &= (~ DMA_SxCR.EN)
return True |
class SparseManifestList(ManifestListInterface):
def __init__(self, manifest_bytes: Bytes, media_type, validate=False):
assert isinstance(manifest_bytes, Bytes)
self._payload = manifest_bytes
self._media_type = media_type
try:
self._parsed = json.loads(self._payload.as_unicode())
except ValueError as e:
raise ManifestException(f'malformed manifest data: {e}')
def schema_version(self):
return self._parsed['schemaVersion']
def digest(self):
return digest_tools.sha256_digest(self._payload.as_encoded_str())
def media_type(self):
return self._media_type
def is_manifest_list(self):
return True
def child_manifests(self, content_retriever):
for manifest in self._parsed['manifests']:
mbytes = json.dumps(manifest)
(yield ManifestReference(Bytes.for_string_or_unicode(mbytes)))
def amd64_linux_manifest_digest(self):
digest = None
for manifest in self._parsed['manifests']:
platform = manifest['platform']
if ((platform['architecture'] == 'amd64') and (platform['os'] == 'linux')):
digest = manifest['digest']
break
return digest
def manifest_dict(self):
return self._parsed
def bytes(self):
return self._payload
def layers_compressed_size(self):
return None
def config(self):
return None
def config_media_type(self):
pass
def validate(self, content_retriever):
pass
def filesystem_layers(self):
return None
def get_layers(self, content_retriever):
pass
def get_leaf_layer_v1_image_id(self, content_retriever):
pass
def get_legacy_image_ids(self, content_retriever):
pass
def blob_digests(self):
pass
def get_blob_digests_for_translation(self):
pass
def local_blob_digests(self):
pass
def get_manifest_labels(self, content_retriever):
return None
def get_requires_empty_layer_blob(self, content_retriever):
return None
def unsigned(self):
pass
def has_legacy_image(self):
pass
def generate_legacy_layers(self, images_map, content_retriever):
pass
def get_schema1_manifest(self, namespace_name, repo_name, tag_name, content_retriever):
pass
def convert_manifest(self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever):
pass |
def is_training_over_time_limit(extra_state: Dict[(str, Any)], stop_time: float) -> bool:
elapsed_hr = (((time.time() - extra_state['start_time']) + extra_state['previous_training_time']) / (60 * 60))
if ((stop_time >= 0) and (elapsed_hr > stop_time)):
print(f"Stopping training due to stop time limit of {stop_time} hours - we've trained for {elapsed_hr} hours.")
return True
return False |
def convertRadisToJSON(config_path_json, config_path_old=CONFIG_PATH_OLD):
config = get_user_config_configformat(config_path_old)
config_json = {}
for i in config.sections():
temp = {}
for j in config[i]:
if (j == 'path'):
if ('\n' in config[i][j]):
store_list = config[i][j].split('\n')
while ('' in store_list):
store_list.remove('')
if (len(store_list) == 1):
temp[j] = store_list[0]
else:
temp[j] = store_list
else:
temp[j] = config[i][j]
else:
temp[j] = config[i][j]
config_json[i] = temp
config_final = {}
config_final['database'] = config_json
config_json_dir = config_path_json
with open(config_json_dir, 'w') as outfile:
json.dump(config_final, outfile, indent=3)
outfile.close()
return |
class Config(NamedTuple):
args: Namespace
bench_once: Callable[([Client, Namespace, Optional[str]], Any)]
create_tidy_results: Callable[([Namespace, np.ndarray, List[Any]], Tuple[(pd.DataFrame, np.ndarray)])]
pretty_print_results: Callable[([Namespace, Mapping[(str, int)], np.ndarray, List[Any]], None)] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.