code stringlengths 281 23.7M |
|---|
.mosaiqdb
def test_session_offsets_for_site(connection):
mock_patient_ident_df = mocks.create_mock_patients()
mock_site_df = mocks.create_mock_treatment_sites(mock_patient_ident_df)
mock_txfield_df = mocks.create_mock_treatment_fields(mock_site_df)
mocks.create_mock_treatment_sessions(mock_site_df, mock_txfield_df)
sit_set_id = 1
sessions_for_one_site = sessions_for_site(connection, sit_set_id)
sessions_for_one_site = list(sessions_for_one_site)
print(sessions_for_one_site)
assert (len(sessions_for_one_site) >= 3)
previous_session_number = None
for (session_number, session_offset) in session_offsets_for_site(connection, sit_set_id):
if previous_session_number:
assert (session_number > previous_session_number)
if (session_offset is not None):
assert (session_offset[0] == (- 1.0))
assert (session_offset[1] == 0.0)
assert (session_offset[2] == 1.0)
previous_session_number = session_number
mean_session_offset = mean_session_offset_for_site(connection, sit_set_id)
assert (mean_session_offset[0] == (- 1.0))
assert (mean_session_offset[1] == 0.0)
assert (mean_session_offset[2] == 1.0)
localization_offset = localization_offset_for_site(connection, sit_set_id)
assert (localization_offset[0] == (- 1.0))
assert (localization_offset[1] == 0.0)
assert (localization_offset[2] == 1.0) |
class EmptyCudaCache(Callback):
def __init__(self, step_interval: int) -> None:
self._step_interval = step_interval
def on_train_step_end(self, state: State, unit: TTrainUnit) -> None:
total_num_steps_completed = unit.train_progress.num_steps_completed
if (state.entry_point == EntryPoint.FIT):
unit_as_eval_unit = cast(TEvalUnit, unit)
total_num_steps_completed += unit_as_eval_unit.eval_progress.num_steps_completed
if ((total_num_steps_completed % self._step_interval) == 0):
torch.cuda.empty_cache()
def on_eval_step_end(self, state: State, unit: TEvalUnit) -> None:
total_num_steps_completed = unit.eval_progress.num_steps_completed
if (state.entry_point == EntryPoint.FIT):
unit_as_train_unit = cast(TTrainUnit, unit)
total_num_steps_completed += unit_as_train_unit.train_progress.num_steps_completed
if ((total_num_steps_completed % self._step_interval) == 0):
torch.cuda.empty_cache()
def on_predict_step_end(self, state: State, unit: TPredictUnit) -> None:
if ((unit.predict_progress.num_steps_completed % self._step_interval) == 0):
torch.cuda.empty_cache() |
class TestOverrideIniArgs():
.parametrize('name', 'setup.cfg tox.ini pytest.ini'.split())
def test_override_ini_names(self, pytester: Pytester, name: str) -> None:
section = ('[pytest]' if (name != 'setup.cfg') else '[tool:pytest]')
pytester.path.joinpath(name).write_text(textwrap.dedent('\n {section}\n custom = 1.0'.format(section=section)), encoding='utf-8')
pytester.makeconftest('\n def pytest_addoption(parser):\n parser.addini("custom", "")')
pytester.makepyfile('\n def test_pass(pytestconfig):\n ini_val = pytestconfig.getini("custom")\n print(\'\\ncustom_option:%s\\n\' % ini_val)')
result = pytester.runpytest('--override-ini', 'custom=2.0', '-s')
assert (result.ret == 0)
result.stdout.fnmatch_lines(['custom_option:2.0'])
result = pytester.runpytest('--override-ini', 'custom=2.0', '--override-ini=custom=3.0', '-s')
assert (result.ret == 0)
result.stdout.fnmatch_lines(['custom_option:3.0'])
def test_override_ini_paths(self, pytester: Pytester) -> None:
pytester.makeconftest('\n def pytest_addoption(parser):\n parser.addini("paths", "my new ini value", type="paths")')
pytester.makeini('\n [pytest]\n paths=blah.py')
pytester.makepyfile('\n def test_overriden(pytestconfig):\n config_paths = pytestconfig.getini("paths")\n print(config_paths)\n for cpf in config_paths:\n print(\'\\nuser_path:%s\' % cpf.name)\n ')
result = pytester.runpytest('--override-ini', 'paths=foo/bar1.py foo/bar2.py', '-s')
result.stdout.fnmatch_lines(['user_path:bar1.py', 'user_path:bar2.py'])
def test_override_multiple_and_default(self, pytester: Pytester) -> None:
pytester.makeconftest('\n def pytest_addoption(parser):\n addini = parser.addini\n addini("custom_option_1", "", default="o1")\n addini("custom_option_2", "", default="o2")\n addini("custom_option_3", "", default=False, type="bool")\n addini("custom_option_4", "", default=True, type="bool")')
pytester.makeini('\n [pytest]\n custom_option_1=custom_option_1\n custom_option_2=custom_option_2\n ')
pytester.makepyfile('\n def test_multiple_options(pytestconfig):\n prefix = "custom_option"\n for x in range(1, 5):\n ini_value=pytestconfig.getini("%s_%d" % (prefix, x))\n print(\'\\nini%d:%s\' % (x, ini_value))\n ')
result = pytester.runpytest('--override-ini', 'custom_option_1=fulldir=/tmp/user1', '-o', 'custom_option_2=url=/tmp/user2?a=b&d=e', '-o', 'custom_option_3=True', '-o', 'custom_option_4=no', '-s')
result.stdout.fnmatch_lines(['ini1:fulldir=/tmp/user1', 'ini2:url=/tmp/user2?a=b&d=e', 'ini3:True', 'ini4:False'])
def test_override_ini_usage_error_bad_style(self, pytester: Pytester) -> None:
pytester.makeini('\n [pytest]\n xdist_strict=False\n ')
result = pytester.runpytest('--override-ini', 'xdist_strict', 'True')
result.stderr.fnmatch_lines(["ERROR: -o/--override-ini expects option=value style (got: 'xdist_strict')."])
.parametrize('with_ini', [True, False])
def test_override_ini_handled_asap(self, pytester: Pytester, with_ini: bool) -> None:
if with_ini:
pytester.makeini('\n [pytest]\n python_files=test_*.py\n ')
pytester.makepyfile(unittest_ini_handle='\n def test():\n pass\n ')
result = pytester.runpytest('--override-ini', 'python_files=unittest_*.py')
result.stdout.fnmatch_lines(['*1 passed in*'])
def test_addopts_before_initini(self, monkeypatch: MonkeyPatch, _config_for_test, _sys_snapshot) -> None:
cache_dir = '.custom_cache'
monkeypatch.setenv('PYTEST_ADDOPTS', ('-o cache_dir=%s' % cache_dir))
config = _config_for_test
config._preparse([], addopts=True)
assert (config._override_ini == [('cache_dir=%s' % cache_dir)])
def test_addopts_from_env_not_concatenated(self, monkeypatch: MonkeyPatch, _config_for_test) -> None:
monkeypatch.setenv('PYTEST_ADDOPTS', '-o')
config = _config_for_test
with pytest.raises(UsageError) as excinfo:
config._preparse(['cache_dir=ignored'], addopts=True)
assert ('error: argument -o/--override-ini: expected one argument (via PYTEST_ADDOPTS)' in excinfo.value.args[0])
def test_addopts_from_ini_not_concatenated(self, pytester: Pytester) -> None:
pytester.makeini('\n [pytest]\n addopts=-o\n ')
result = pytester.runpytest('cache_dir=ignored')
result.stderr.fnmatch_lines([('%s: error: argument -o/--override-ini: expected one argument (via addopts config)' % (pytester._request.config._parser.optparser.prog,))])
assert (result.ret == _pytest.config.ExitCode.USAGE_ERROR)
def test_override_ini_does_not_contain_paths(self, _config_for_test, _sys_snapshot) -> None:
config = _config_for_test
config._preparse(['-o', 'cache_dir=/cache', '/some/test/path'])
assert (config._override_ini == ['cache_dir=/cache'])
def test_multiple_override_ini_options(self, pytester: Pytester) -> None:
pytester.makepyfile(**{'conftest.py': "\n def pytest_addoption(parser):\n parser.addini('foo', default=None, help='some option')\n parser.addini('bar', default=None, help='some option')\n ", 'test_foo.py': "\n def test(pytestconfig):\n assert pytestconfig.getini('foo') == '1'\n assert pytestconfig.getini('bar') == '0'\n ", 'test_bar.py': '\n def test():\n assert False\n '})
result = pytester.runpytest('-o', 'foo=1', '-o', 'bar=0', 'test_foo.py')
assert ('ERROR:' not in result.stderr.str())
result.stdout.fnmatch_lines(['collected 1 item', '*= 1 passed in *=']) |
class InferenceContext():
__slots__ = ('path', 'lookupname', 'callcontext', 'boundnode', 'extra_context', 'constraints', '_nodes_inferred')
max_inferred = 100
def __init__(self, path: (set[tuple[(nodes.NodeNG, (str | None))]] | None)=None, nodes_inferred: (list[int] | None)=None) -> None:
if (nodes_inferred is None):
self._nodes_inferred = [0]
else:
self._nodes_inferred = nodes_inferred
self.path = (path or set())
self.lookupname: (str | None) = None
self.callcontext: (CallContext | None) = None
self.boundnode: (SuccessfulInferenceResult | None) = None
self.extra_context: dict[(SuccessfulInferenceResult, InferenceContext)] = {}
self.constraints: dict[(str, dict[(nodes.If, set[constraint.Constraint])])] = {}
def nodes_inferred(self) -> int:
return self._nodes_inferred[0]
_inferred.setter
def nodes_inferred(self, value: int) -> None:
self._nodes_inferred[0] = value
def inferred(self) -> _InferenceCache:
return _INFERENCE_CACHE
def push(self, node: nodes.NodeNG) -> bool:
name = self.lookupname
if ((node, name) in self.path):
return True
self.path.add((node, name))
return False
def clone(self) -> InferenceContext:
clone = InferenceContext(self.path.copy(), nodes_inferred=self._nodes_inferred)
clone.callcontext = self.callcontext
clone.boundnode = self.boundnode
clone.extra_context = self.extra_context
clone.constraints = self.constraints.copy()
return clone
def restore_path(self) -> Iterator[None]:
path = set(self.path)
(yield)
self.path = path
def is_empty(self) -> bool:
return ((not self.path) and (not self.nodes_inferred) and (not self.callcontext) and (not self.boundnode) and (not self.lookupname) and (not self.callcontext) and (not self.extra_context) and (not self.constraints))
def __str__(self) -> str:
state = (f'{field}={pprint.pformat(getattr(self, field), width=(80 - len(field)))}' for field in self.__slots__)
return '{}({})'.format(type(self).__name__, ',\n '.join(state)) |
(optionalhook=True)
def pytest_selenium_runtest_makereport(item, report, summary, extra):
provider = BrowserStack()
if (not provider.uses_driver(item.config.getoption('driver'))):
return
passed = (report.passed or (report.failed and hasattr(report, 'wasxfail')))
session_id = item._driver.session_id
api_url = provider.API.format(session=session_id)
import requests
try:
job_info = requests.get(api_url, auth=provider.auth, timeout=10).json()
job_url = job_info['automation_session'][provider.job_access]
summary.append('{0} Job: {1}'.format(provider.name, job_url))
pytest_html = item.config.pluginmanager.getplugin('html')
extra.append(pytest_html.extras.url(job_url, '{0} Job'.format(provider.name)))
except Exception as e:
summary.append('WARNING: Failed to determine {0} job URL: {1}'.format(provider.name, e))
try:
job_status = job_info['automation_session']['status']
status = ('running' if passed else 'error')
if ((report.when == 'teardown') and passed):
status = 'completed'
if (job_status not in ('error', status)):
requests.put(api_url, headers={'Content-Type': 'application/json'}, params={'status': status}, auth=provider.auth, timeout=10)
except Exception as e:
summary.append('WARNING: Failed to update job status: {0}'.format(e)) |
class ResNet(nn.Module):
def __init__(self, block, layers, n_channels=3, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, drop_rate=0.0):
super(ResNet, self).__init__()
self.drop_rate = drop_rate
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(n_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer, drop_rate=self.drop_rate))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x) |
class PoseEstimationEvaluator(chainer.training.extensions.Evaluator):
def comm(self):
if (not hasattr(self, '_comm')):
self._comm = None
return self._comm
def comm(self, value):
self._comm = value
def evaluate(self):
iterator = self._iterators['main']
eval_func = (self.eval_func or self._targets['main'])
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
warnings.warn('This iterator does not have the reset method. Evaluator copies the iterator instead of resetting. This behavior is deprecated. Please implement the reset method.', DeprecationWarning)
it = copy.copy(iterator)
if ((self._progress_bar and (self.comm is None)) or (self.comm.rank == 0)):
pbar = _IteratorProgressBar(iterator=it)
observations = []
for batch in it:
observation = {}
with reporter_module.report_scope(observation):
in_arrays = convert_module._call_converter(self.converter, batch, self.device)
with function.no_backprop_mode():
if isinstance(in_arrays, tuple):
eval_func(*in_arrays)
elif isinstance(in_arrays, dict):
eval_func(**in_arrays)
else:
eval_func(in_arrays)
for (k, v) in list(observation.items()):
if hasattr(v, 'array'):
v = chainer.cuda.to_cpu(v.array)
if hasattr(v, 'item'):
v = v.item()
observation[k] = v
observations.append(observation)
if ((self._progress_bar and (self.comm is None)) or (self.comm.rank == 0)):
pbar.update()
if ((self._progress_bar and (self.comm is None)) or (self.comm.rank == 0)):
pbar.close()
local_df = pandas.DataFrame(observations)
if self.comm:
dfs = self.comm.gather_obj(local_df)
if (self.comm.rank == 0):
global_df = pandas.concat(dfs, sort=True)
else:
return {}
else:
global_df = local_df
summary = reporter_module.DictSummary()
adds = collections.defaultdict(list)
for (_, row) in global_df.iterrows():
observation = row.dropna().to_dict()
observation_processed = {}
add_types = ['add', 'add_s', 'add_or_add_s']
for (key, value) in observation.items():
for add_type in add_types:
pattern = f'validation/main/{add_type}/([0-9]+)/.+'
match = re.match(pattern, key)
if (not match):
continue
class_id = match.groups()[0]
key = f'validation/main/{add_type}/{class_id}'
adds[f'{add_type}/{class_id}'].append(value)
break
observation_processed[key] = value
summary.add(observation_processed)
result = summary.compute_mean()
for (add_type_and_class_id, values) in adds.items():
auc = metrics.ycb_video_add_auc(values, max_value=0.1)
result[f'validation/main/auc/{add_type_and_class_id}'] = auc
lt_2cm = ((np.array(values) < 0.02).sum() / len(values))
result[f'validation/main/<2cm/{add_type_and_class_id}'] = lt_2cm
parent_keys = ['validation/main/loss', 'validation/main/loss_quaternion', 'validation/main/loss_translation', 'validation/main/add', 'validation/main/add_s', 'validation/main/add_or_add_s', 'validation/main/auc/add', 'validation/main/auc/add_s', 'validation/main/auc/add_or_add_s', 'validation/main/<2cm/add', 'validation/main/<2cm/add_s', 'validation/main/<2cm/add_or_add_s']
summary = reporter_module.DictSummary()
for parent_key in parent_keys:
if (parent_key in result):
continue
for (key, value) in result.items():
if (osp.dirname(key) == parent_key):
summary.add({parent_key: value})
result.update(summary.compute_mean())
return result |
def get_args(driver=None, download_dir=None, download_ftypes=None, firefox_pref=None, firefox_prof_dir=None, remote_url=None, executable=None, headless=False, driver_kwargs=None):
kwargs = {}
firefox_profile_preferences = dict({'browser.download.folderList': 2, 'browser.download.manager.showWhenStarting': False, 'browser.download.dir': download_dir, 'browser.helperApps.neverAsk.saveToDisk': download_ftypes, 'browser.helperApps.alwaysAsk.force': False, 'pdfjs.disabled': True}, **(firefox_pref or {}))
if (driver == 'firefox'):
kwargs['profile_preferences'] = firefox_profile_preferences
kwargs['profile'] = firefox_prof_dir
if headless:
kwargs['headless'] = headless
elif (driver == 'remote'):
if remote_url:
kwargs['command_executor'] = remote_url
kwargs['keep_alive'] = True
profile = FirefoxProfile(firefox_prof_dir)
for (key, value) in firefox_profile_preferences.items():
profile.set_preference(key, value)
kwargs['desired_capabilities'] = driver_kwargs.get('desired_capabilities', {})
kwargs['desired_capabilities']['firefox_profile'] = profile.encoded
kwargs['desired_capabilities']['moz:firefoxOptions'] = driver_kwargs.get('moz:firefoxOptions', {})
kwargs['desired_capabilities']['moz:firefoxOptions']['profile'] = profile.encoded
elif (driver in ('chrome',)):
if executable:
kwargs['executable_path'] = executable
if headless:
kwargs['headless'] = headless
if driver_kwargs:
kwargs.update(driver_kwargs)
return kwargs |
def get_parameter_groups(model):
no_weight_decay_names = ['bias', 'normalization', 'label_embeddings']
parameter_groups = [{'params': [param for (name, param) in model.named_parameters() if (not any(((no_weight_decay_name in name) for no_weight_decay_name in no_weight_decay_names)))]}, {'params': [param for (name, param) in model.named_parameters() if any(((no_weight_decay_name in name) for no_weight_decay_name in no_weight_decay_names))], 'weight_decay': 0}]
return parameter_groups |
def get_baseline_dict_entry(tag):
if (not isinstance(tag, pydicom.tag.BaseTag)):
tag = pydicom.tag.Tag(tag)
try:
return get_baseline_dicom_dict()[tag]
except KeyError:
if (not tag.is_private):
mask_x = pydicom.datadict.mask_match(tag)
if mask_x:
return get_baseline_dicom_repeaters_dict()[mask_x]
raise NotInBaselineError('pydicom.tag.Tag {} not found in DICOM dictionary'.format(tag)) |
class SwaggerMaskHeaderTest(object):
def test_marshal_with_expose_mask_header(self, app, client):
api = Api(app)
model = api.model('Test', {'name': fields.String, 'age': fields.Integer, 'boolean': fields.Boolean})
('/test/')
class TestResource(Resource):
_with(model)
def get(self):
return {'name': 'John Doe', 'age': 42, 'boolean': True}
specs = client.get_specs()
op = specs['paths']['/test/']['get']
assert ('parameters' in op)
assert (len(op['parameters']) == 1)
param = op['parameters'][0]
assert (param['name'] == 'X-Fields')
assert (param['type'] == 'string')
assert (param['format'] == 'mask')
assert (param['in'] == 'header')
assert ('required' not in param)
assert ('default' not in param)
def test_marshal_with_expose_custom_mask_header(self, app, client):
api = Api(app)
model = api.model('Test', {'name': fields.String, 'age': fields.Integer, 'boolean': fields.Boolean})
('/test/')
class TestResource(Resource):
_with(model)
def get(self):
return {'name': 'John Doe', 'age': 42, 'boolean': True}
app.config['RESTX_MASK_HEADER'] = 'X-Mask'
specs = client.get_specs()
op = specs['paths']['/test/']['get']
assert ('parameters' in op)
assert (len(op['parameters']) == 1)
param = op['parameters'][0]
assert (param['name'] == 'X-Mask')
def test_marshal_with_disabling_mask_header(self, app, client):
api = Api(app)
model = api.model('Test', {'name': fields.String, 'age': fields.Integer, 'boolean': fields.Boolean})
('/test/')
class TestResource(Resource):
_with(model)
def get(self):
return {'name': 'John Doe', 'age': 42, 'boolean': True}
app.config['RESTX_MASK_SWAGGER'] = False
specs = client.get_specs()
op = specs['paths']['/test/']['get']
assert ('parameters' not in op)
def test_is_only_exposed_on_marshal_with(self, app, client):
api = Api(app)
model = api.model('Test', {'name': fields.String, 'age': fields.Integer, 'boolean': fields.Boolean})
('/test/')
class TestResource(Resource):
def get(self):
return api.marshal({'name': 'John Doe', 'age': 42, 'boolean': True}, model)
specs = client.get_specs()
op = specs['paths']['/test/']['get']
assert ('parameters' not in op)
def test_marshal_with_expose_default_mask_header(self, app, client):
api = Api(app)
model = api.model('Test', {'name': fields.String, 'age': fields.Integer, 'boolean': fields.Boolean})
('/test/')
class TestResource(Resource):
_with(model, mask='{name,age}')
def get(self):
pass
specs = client.get_specs()
op = specs['paths']['/test/']['get']
assert ('parameters' in op)
assert (len(op['parameters']) == 1)
param = op['parameters'][0]
assert (param['name'] == 'X-Fields')
assert (param['type'] == 'string')
assert (param['format'] == 'mask')
assert (param['default'] == '{name,age}')
assert (param['in'] == 'header')
assert ('required' not in param)
def test_marshal_with_expose_default_model_mask_header(self, app, client):
api = Api(app)
model = api.model('Test', {'name': fields.String, 'age': fields.Integer, 'boolean': fields.Boolean}, mask='{name,age}')
('/test/')
class TestResource(Resource):
_with(model)
def get(self):
pass
specs = client.get_specs()
definition = specs['definitions']['Test']
assert ('x-mask' in definition)
assert (definition['x-mask'] == '{name,age}') |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
forward = _forward |
def _install_one(requirement, cmd, pkgname, modulename):
cmd.args = [requirement]
cmd.ensure_finalized()
cmd.run()
target = cmd.install_dir
dest_path = glob.glob(os.path.join(target, (pkgname + '*.egg')))
assert dest_path
assert os.path.exists(os.path.join(dest_path[0], pkgname, modulename)) |
def grid_partition(x, grid_size: List[int]):
(B, H, W, C) = x.shape
_assert(((H % grid_size[0]) == 0), f'height {H} must be divisible by grid {grid_size[0]}')
_assert(((W % grid_size[1]) == 0), '')
x = x.view(B, grid_size[0], (H // grid_size[0]), grid_size[1], (W // grid_size[1]), C)
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view((- 1), grid_size[0], grid_size[1], C)
return windows |
def fetch_data_table(api_key, show_progress, retries):
for _ in range(retries):
try:
if show_progress:
log.info('Downloading WIKI metadata.')
metadata = pd.read_csv(format_metadata_url(api_key))
table_url = metadata.loc[(0, 'file.link')]
if show_progress:
raw_file = download_with_progress(table_url, chunk_size=ONE_MEGABYTE, label='Downloading WIKI Prices table from Quandl')
else:
raw_file = download_without_progress(table_url)
return load_data_table(file=raw_file, index_col=None, show_progress=show_progress)
except Exception:
log.exception('Exception raised reading Quandl data. Retrying.')
else:
raise ValueError(('Failed to download Quandl data after %d attempts.' % retries)) |
def replacePassword(actionData, password):
if (actionData['TYPE'] == 'COMMANDS'):
for i in range(len(actionData['COMMANDS'])):
for j in range(len(actionData['COMMANDS'][i])):
while ('VM_PASSWORD' in actionData['COMMANDS'][i][j]):
actionData['COMMANDS'][i][j] = actionData['COMMANDS'][i][j].replace('VM_PASSWORD', password) |
class FortuneThread(QThread):
newFortune = pyqtSignal(str)
error = pyqtSignal(int, str)
def __init__(self, parent=None):
super(FortuneThread, self).__init__(parent)
self.quit = False
self.hostName = ''
self.cond = QWaitCondition()
self.mutex = QMutex()
self.port = 0
def __del__(self):
self.mutex.lock()
self.quit = True
self.cond.wakeOne()
self.mutex.unlock()
self.wait()
def requestNewFortune(self, hostname, port):
locker = QMutexLocker(self.mutex)
self.hostName = hostname
self.port = port
if (not self.isRunning()):
self.start()
else:
self.cond.wakeOne()
def run(self):
self.mutex.lock()
serverName = self.hostName
serverPort = self.port
self.mutex.unlock()
while (not self.quit):
Timeout = (5 * 1000)
socket = QTcpSocket()
socket.connectToHost(serverName, serverPort)
if (not socket.waitForConnected(Timeout)):
self.error.emit(socket.error(), socket.errorString())
return
while (socket.bytesAvailable() < 2):
if (not socket.waitForReadyRead(Timeout)):
self.error.emit(socket.error(), socket.errorString())
return
instr = QDataStream(socket)
instr.setVersion(QDataStream.Qt_4_0)
blockSize = instr.readUInt16()
while (socket.bytesAvailable() < blockSize):
if (not socket.waitForReadyRead(Timeout)):
self.error.emit(socket.error(), socket.errorString())
return
self.mutex.lock()
fortune = instr.readQString()
self.newFortune.emit(fortune)
self.cond.wait(self.mutex)
serverName = self.hostName
serverPort = self.port
self.mutex.unlock() |
class FakeResponse(web.Response):
headers = CIMultiDict({'content-type': 'application/json; charset=utf-8', 'x-ratelimit-limit': '10', 'x-ratelimit-remaining': '5', 'x-ratelimit-reset': '1'})
url = 'test URL'
def __init__(self, data=None, **kwargs):
super().__init__(**kwargs)
self._data = data
async def json(self):
return self._data
async def text(self):
return self._data
async def read(self):
return json.dumps(self._data).encode('utf-8') |
def test_colorscheme_gentoo_workaround(config_stub, gentoo_versions):
config_stub.val.colors.webpage.preferred_color_scheme = 'dark'
darkmode_settings = darkmode.settings(versions=gentoo_versions, special_flags=[])
assert (darkmode_settings['blink-settings'] == [('preferredColorScheme', '0')]) |
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, (- 3), 4, 6]
def __getitem__(self, idx):
results = dict(x=torch.tensor([1]))
return results
def __len__(self):
return 1
_autospec
def evaluate(self, results, logger=None):
pass |
(hookwrapper=True)
def pytest_fixture_setup(fixturedef: FixtureDef, request: SubRequest) -> Optional[object]:
if (fixturedef.argname == 'event_loop'):
_add_finalizers(fixturedef, _close_event_loop, _restore_event_loop_policy(asyncio.get_event_loop_policy()), _provide_clean_event_loop)
outcome = (yield)
loop = outcome.get_result()
fixture_filename = inspect.getsourcefile(fixturedef.func)
if (not getattr(loop, '__original_fixture_loop', False)):
(_, fixture_line_number) = inspect.getsourcelines(fixturedef.func)
warnings.warn((_REDEFINED_EVENT_LOOP_FIXTURE_WARNING % (fixture_filename, fixture_line_number)), DeprecationWarning)
policy = asyncio.get_event_loop_policy()
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
old_loop = policy.get_event_loop()
is_pytest_asyncio_loop = getattr(old_loop, '__pytest_asyncio', False)
if ((old_loop is not loop) and (not is_pytest_asyncio_loop)):
old_loop.close()
except RuntimeError:
pass
policy.set_event_loop(loop)
return
(yield) |
_REGISTRY.register()
class CIFAR10C(DatasetBase):
dataset_dir = ''
domains = ['cifar10', 'cifar10_c']
def __init__(self, cfg):
root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = root
self.check_input_domains(cfg.DATASET.SOURCE_DOMAINS, cfg.DATASET.TARGET_DOMAINS)
source_domain = cfg.DATASET.SOURCE_DOMAINS[0]
target_domain = cfg.DATASET.TARGET_DOMAINS[0]
assert (source_domain == self.domains[0])
assert (target_domain == self.domains[1])
c_type = cfg.DATASET.CIFAR_C_TYPE
c_level = cfg.DATASET.CIFAR_C_LEVEL
if (not c_type):
raise ValueError('Please specify DATASET.CIFAR_C_TYPE in the config file')
assert (c_type in AVAI_C_TYPES), f'C_TYPE is expected to belong to {AVAI_C_TYPES}, but got "{c_type}"'
assert (1 <= c_level <= 5)
train_dir = osp.join(self.dataset_dir, source_domain, 'train')
test_dir = osp.join(self.dataset_dir, target_domain, c_type, str(c_level))
if (not osp.exists(test_dir)):
raise ValueError
train = self._read_data(train_dir)
test = self._read_data(test_dir)
super().__init__(train_x=train, test=test)
def _read_data(self, data_dir):
class_names = listdir_nohidden(data_dir)
class_names.sort()
items = []
for (label, class_name) in enumerate(class_names):
class_dir = osp.join(data_dir, class_name)
imnames = listdir_nohidden(class_dir)
for imname in imnames:
impath = osp.join(class_dir, imname)
item = Datum(impath=impath, label=label, domain=0)
items.append(item)
return items |
_deepspeed
_torch_gpu
class TestDeepSpeedModelZoo(TestCasePlus):
def get_task_cmd(self, task, stage):
if (task not in task_cmds):
raise ValueError(f"don't know of task {task}, have {task_cmds.keys()}")
cmd = task_cmds[task]
args_ds = f'--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json'.split()
output_dir = self.get_auto_remove_tmp_dir()
args_out = f'--output_dir {output_dir}'.split()
cmd += (args_ds + args_out)
return (cmd, output_dir)
(params, name_func=parameterized_custom_name_func)
def test_zero_to_fp32(self, stage, task):
(cmd, output_dir) = self.get_task_cmd(task, stage)
cmd += '--save_steps 1'.split()
execute_subprocess_async(cmd, env=self.get_env())
chkpt_dir = f'{output_dir}/checkpoint-1'
recovered_model_path = f'{chkpt_dir}/out.bin'
cmd = f'{chkpt_dir}/zero_to_fp32.py {chkpt_dir} {recovered_model_path}'
subprocess.check_call(cmd, shell=True)
assert os.path.exists(recovered_model_path), f'{recovered_model_path} was not found' |
def test_set_inf_nodata(tmpdir):
dst_path = str(tmpdir.join('lol.tif'))
with rasterio.open('tests/data/RGB.byte.tif') as src:
meta = src.meta
meta['dtype'] = 'float32'
meta['nodata'] = float('inf')
with rasterio.open(dst_path, 'w', **meta) as dst:
assert numpy.isinf(dst.nodata)
assert numpy.isinf(dst.meta['nodata'])
assert numpy.isinf(dst.nodatavals).all() |
class TestPruningLRUProxiedImagesToAllowBlobUpload():
upstream_registry = 'docker.io'
upstream_repository = 'library/busybox'
orgname = 'proxy-cache'
repository = f'{orgname}/{upstream_repository}'
tag = '1.35.0'
(autouse=True)
def setup(self, app):
self.user = get_user('devtable')
self.org = create_organization(self.orgname, '{self.orgname}', self.user)
self.org.save()
self.config = create_proxy_cache_config(org_name=self.orgname, upstream_registry=self.upstream_registry, expiration_s=3600)
('data.registry_model.registry_proxy_model.Proxy', MagicMock())
def test_auto_pruning_skipped_if_no_quota_set(self, create_repo):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
input_manifest = parse_manifest_from_bytes(Bytes.for_string_or_unicode(UBI8_8_4_MANIFEST_SCHEMA2), DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
try:
proxy_model._check_image_upload_possible_or_prune(repo_ref, input_manifest)
except QuotaExceededException:
assert False, 'No exception should be raised here'
('data.registry_model.registry_proxy_model.Proxy', MagicMock())
def test_auto_pruning_skipped_for_manifest_list(self, create_repo, initialized_db):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
input_manifest = parse_manifest_from_bytes(Bytes.for_string_or_unicode(testdata.PYTHON_LATEST['manifest']), DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE)
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
try:
proxy_model._check_image_upload_possible_or_prune(repo_ref, input_manifest)
except QuotaExceededException:
assert False, 'No exception should be raised here'
assert (namespacequota.get_namespace_size(self.orgname) == 0)
('data.registry_model.registry_proxy_model.Proxy', MagicMock())
def test_raises_quota_exceed_when_blob_is_bigger_than_max_quota(self, create_repo):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
input_manifest = parse_manifest_from_bytes(Bytes.for_string_or_unicode(testdata.PYTHON_LINUX_AMD64['manifest']), DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
limit_bytes = 2048
namespace = user.get_user_or_org(self.orgname)
namespacequota.create_namespace_quota(namespace, limit_bytes)
with pytest.raises(QuotaExceededException):
proxy_model._check_image_upload_possible_or_prune(repo_ref, input_manifest)
('data.registry_model.registry_proxy_model.Proxy', MagicMock())
def test_auto_pruning_when_quota_limit_reached(self, create_repo, proxy_manifest_response, initialized_db):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
limit_bytes =
namespace = user.get_user_or_org(self.orgname)
namespacequota.create_namespace_quota(namespace, limit_bytes)
proxy_mock = proxy_manifest_response('8.4', UBI8_8_4_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
first_manifest = proxy_model.get_repo_tag(repo_ref, '8.4')
assert (first_manifest is not None)
first_tag = oci.tag.get_tag(repo_ref.id, '8.4')
assert (first_tag is not None)
assert (namespacequota.get_namespace_size(self.orgname) == )
input_manifest = parse_manifest_from_bytes(Bytes.for_string_or_unicode(testdata.UBI8_LINUX_AMD64['manifest']), DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
assert (proxy_model._check_image_upload_possible_or_prune(repo_ref, input_manifest) is None)
first_tag = oci.tag.get_tag(repo_ref.id, '8.4')
assert (first_tag is None) |
def test_mouse_press_event_small_item_inside_handle_free_center(view, item):
view.scene.addItem(item)
item.setSelected(True)
event = MagicMock()
event.pos.return_value = QtCore.QPointF(10, 10)
event.button.return_value = Qt.MouseButton.LeftButton
with patch('PyQt6.QtWidgets.QGraphicsPixmapItem.mousePressEvent') as m:
with patch.object(item, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 20, 20)):
item.mousePressEvent(event)
event.accept.assert_not_called()
m.assert_called_once_with(event) |
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res |
.parametrize('repeat', [1, 2])
.parametrize('preset_name', [None, 'Starter Preset'])
.parametrize('no_retry', [False, True])
def test_generate_logic(no_retry: bool, preset_name: (str | None), repeat: int, mocker: pytest_mock.MockerFixture, preset_manager):
layout_description = MagicMock()
mock_run = mocker.patch('asyncio.run', return_value=layout_description)
mock_generate = mocker.patch('randovania.generator.generator.generate_and_validate_description', new_callable=MagicMock)
mock_from_str: MagicMock = mocker.patch('randovania.layout.permalink.Permalink.from_str', autospec=True)
args = MagicMock()
args.output_file = Path('asdfasdf/qwerqwerqwer/zxcvzxcv.json')
args.no_retry = no_retry
args.repeat = repeat
if (preset_name is None):
args.permalink = '<the permalink>'
mock_from_str.return_value.seed_hash = b'12345'
else:
args.game = RandovaniaGame.METROID_PRIME_ECHOES.value
args.preset_name = [preset_name]
args.seed_number = 0
args.race = False
args.development = False
extra_args = {}
if no_retry:
extra_args['attempts'] = 0
if (preset_name is None):
generator_params: GeneratorParameters = mock_from_str.return_value.parameters
else:
args.permalink = None
preset = preset_manager.included_preset_with(RandovaniaGame.METROID_PRIME_ECHOES, preset_name).get_preset()
generator_params = GeneratorParameters(0, True, [preset])
if (preset_name is None):
randovania.cli.commands.generate.generate_from_permalink_logic(args)
else:
randovania.cli.commands.generate.generate_from_preset_logic(args)
if (preset_name is None):
mock_from_str.assert_called_once_with(args.permalink)
else:
mock_from_str.assert_not_called()
mock_generate.assert_has_calls(([call(generator_params=generator_params, status_update=ANY, validate_after_generation=args.validate, timeout=None, **extra_args)] * repeat))
mock_run.assert_has_calls(([call(mock_generate.return_value)] * repeat))
save_file_mock: MagicMock = layout_description.save_to_file
save_file_mock.assert_called_once_with(args.output_file) |
def test_mlp_grad(test, device):
results = load_golden()
torch_weights = results['weights']
torch_weights_grad = results['weights_grad']
torch_bias = results['bias']
torch_bias_grad = results['bias_grad']
torch_x = results['x'].T
torch_x_grad = results['x_grad'].T
torch_y = results['y'].T
torch_y_grad = results['y_grad'].T
torch_loss = results['loss'].T
weights = wp.array(torch_weights, dtype=float, device=device, requires_grad=True)
bias = wp.array(torch_bias, dtype=float, device=device, requires_grad=True)
x = wp.array(torch_x, dtype=float, device=device, requires_grad=True)
y = wp.array(torch_y, dtype=float, device=device, requires_grad=True)
y.zero_()
loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
m = torch_weights.shape[0]
n = torch_weights.shape[1]
b = torch_x.shape[1]
tape = wp.Tape()
with tape:
wp.launch(mlp_kernel, dim=b, inputs=[weights, bias, x, y], device=device)
wp.launch(loss_kernel, dim=y.shape, inputs=[y, loss], device=device)
tape.backward(loss=loss)
assert_np_equal(y.numpy().reshape((- 1), b), torch_y, tol=0.1)
assert_np_equal(loss.numpy(), torch_loss, tol=0.1)
assert_np_equal(tape.gradients[weights].numpy().reshape(m, n), torch_weights_grad, tol=0.1)
assert_np_equal(tape.gradients[bias].numpy(), torch_bias_grad, tol=0.1)
assert_np_equal(tape.gradients[x].numpy().reshape(n, b), torch_x_grad, tol=0.1)
assert_np_equal(tape.gradients[y].numpy().reshape(m, b), torch_y_grad, tol=0.1) |
def test_models():
for _ in range(3):
clf = CacheClassifier('clf', SGDClassifier(loss='log'))
check_classifier(clf, has_staged_pp=False, has_importances=False)
reg = CacheRegressor('reg', SGDRegressor())
check_regression(reg, has_staged_predictions=False, has_importances=False)
cache_helper.clear_cache() |
def _lru_cache_with_config_path(func: Callable):
_cache()
def _call_without_config_path_wrapper(sensor_name, _):
return func(sensor_name)
def _add_config_path_wrapper(sensor_name: str):
config_path = satpy.config.get('config_path')
config_path = tuple(config_path)
return _call_without_config_path_wrapper(sensor_name, config_path)
wrapper = update_wrapper(_add_config_path_wrapper, func)
wrapper = _update_cached_wrapper(wrapper, _call_without_config_path_wrapper)
return wrapper |
def update_best_score(new_score, old_score, is_higher_better):
if (not old_score):
(score, updated) = (new_score, True)
elif is_higher_better:
score = max(new_score, old_score)
updated = (new_score > old_score)
else:
score = min(new_score, old_score)
updated = (new_score < old_score)
return (score, updated) |
class SponsorshipPackageTests(TestCase):
def setUp(self):
self.package = baker.make('sponsors.SponsorshipPackage')
self.package_benefits = baker.make(SponsorshipBenefit, _quantity=3)
self.package.benefits.add(*self.package_benefits)
def test_has_user_customization_if_benefit_from_other_package(self):
extra = baker.make(SponsorshipBenefit)
benefits = ([extra] + self.package_benefits)
has_customization = self.package.has_user_customization(benefits)
customization = {'added_by_user': {extra}, 'removed_by_user': set()}
self.assertTrue(has_customization)
self.assertEqual(customization, self.package.get_user_customization(benefits))
def test_no_user_customization_if_all_benefits_from_package(self):
has_customization = self.package.has_user_customization(self.package_benefits)
customization = {'added_by_user': set(), 'removed_by_user': set()}
self.assertFalse(has_customization)
self.assertEqual(customization, self.package.get_user_customization(self.package_benefits))
def test_has_user_customization_if_missing_package_benefit(self):
removed_benefit = self.package_benefits.pop()
has_customization = self.package.has_user_customization(self.package_benefits)
customization = {'added_by_user': set(), 'removed_by_user': {removed_benefit}}
self.assertTrue(has_customization)
self.assertEqual(customization, self.package.get_user_customization(self.package_benefits))
def test_no_user_customization_if_at_least_one_of_conflicts_is_passed(self):
benefits = baker.make(SponsorshipBenefit, _quantity=3)
benefits[0].conflicts.add(benefits[1])
benefits[0].conflicts.add(benefits[2])
benefits[1].conflicts.add(benefits[2])
self.package.benefits.add(*benefits)
customization = self.package.has_user_customization((self.package_benefits + benefits[:1]))
self.assertFalse(customization)
def test_user_customization_if_missing_benefit_with_conflict(self):
benefits = baker.make(SponsorshipBenefit, _quantity=3)
benefits[0].conflicts.add(benefits[1])
benefits[0].conflicts.add(benefits[2])
benefits[1].conflicts.add(benefits[2])
self.package.benefits.add(*benefits)
customization = self.package.has_user_customization(self.package_benefits)
self.assertTrue(customization)
def test_user_customization_if_missing_benefit_with_conflict_from_one_or_more_conflicts_set(self):
benefits = baker.make(SponsorshipBenefit, _quantity=4)
benefits[0].conflicts.add(benefits[1])
benefits[2].conflicts.add(benefits[3])
self.package.benefits.add(*benefits)
benefits = (self.package_benefits + [benefits[0]])
customization = self.package.has_user_customization(benefits)
self.assertTrue(customization)
def test_clone_package_to_next_year(self):
pkg = baker.make(SponsorshipPackage, year=2022, advertise=True, logo_dimension=300)
(pkg_2023, created) = pkg.clone(year=2023)
self.assertTrue(created)
self.assertTrue(pkg_2023.pk)
self.assertEqual(2023, pkg_2023.year)
self.assertEqual(pkg.name, pkg_2023.name)
self.assertEqual(pkg.order, pkg_2023.order)
self.assertEqual(pkg.sponsorship_amount, pkg_2023.sponsorship_amount)
self.assertEqual(True, pkg_2023.advertise)
self.assertEqual(300, pkg_2023.logo_dimension)
self.assertEqual(pkg.slug, pkg_2023.slug)
def test_clone_does_not_repeate_already_cloned_package(self):
(pkg_2023, created) = self.package.clone(year=2023)
(repeated_pkg_2023, created) = self.package.clone(year=2023)
self.assertFalse(created)
self.assertEqual(pkg_2023.pk, repeated_pkg_2023.pk) |
class ImageNet100(data.Dataset):
def __init__(self, data_dir, dataidxs=None, train=True, transform=None, target_transform=None, download=False, client_num=100, alpha=None):
self.dataidxs = dataidxs
self.client_num = client_num
self.train = train
self.transform = transform
self.target_transform = target_transform
self.download = download
self.loader = default_loader
if self.train:
self.data_dir = os.path.join(data_dir, 'train')
else:
self.data_dir = os.path.join(data_dir, 'val')
self.alpha = alpha
(self.all_data, self.data_local_num_dict, self.net_dataidx_map) = self.__getdatasets__()
self.initial_local_data()
def initial_local_data(self):
if (self.dataidxs == None):
self.local_data = self.all_data
elif (type(self.dataidxs) == int):
if (self.alpha is not None):
self.local_data = self.all_data[self.net_dataidx_map[self.dataidxs]]
else:
(begin, end) = self.net_dataidx_map[self.dataidxs]
self.local_data = self.all_data[begin:end]
else:
assert (self.alpha is None)
self.local_data = []
for idxs in self.dataidxs:
(begin, end) = self.net_dataidx_map[idxs]
self.local_data += self.all_data[begin:end]
def get_local_data(self):
return self.local_data
def get_net_dataidx_map(self):
return self.net_dataidx_map
def get_data_local_num_dict(self):
return self.data_local_num_dict
def __getdatasets__(self):
(classes, class_to_idx) = find_classes(self.data_dir)
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
if (self.alpha is not None):
(all_data, data_local_num_dict, net_dataidx_map) = make_dataset_with_dirichlet_sampling(self.data_dir, class_to_idx, IMG_EXTENSIONS, self.client_num, num_classes=100, alpha=self.alpha)
else:
(all_data, data_local_num_dict, net_dataidx_map) = make_dataset(self.data_dir, class_to_idx, IMG_EXTENSIONS, num_classes=100)
if (len(all_data) == 0):
raise RuntimeError(((('Found 0 files in subfolders of: ' + self.data_dir) + '\nSupported extensions are: ') + ','.join(IMG_EXTENSIONS)))
return (all_data, data_local_num_dict, net_dataidx_map)
def __getitem__(self, index):
(path, target) = self.local_data[index]
img = self.loader(path)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target)
def __len__(self):
return len(self.local_data) |
class WideResNet1(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet1, self).__init__()
nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = ((depth - 4) / 6)
block = BasicBlock
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view((- 1), self.nChannels[3])
return self.fc(out) |
class FastAIMixedOptim(OptimWrapper):
def create(cls, opt_func, lr, layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
(opt.model_params, opt.master_params) = get_master(layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
(mom, wd, beta) = (opt.mom, opt.wd, opt.beta)
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for (mp, lr) in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
(opt.mom, opt.wd, opt.beta) = (mom, wd, beta)
return opt
def step(self):
model_g2master_g(self.model_params, self.master_params, self.flat_master)
for group in self.master_params:
for param in group:
param.grad.div_(self.loss_scale)
super(FastAIMixedOptim, self).step()
self.model.zero_grad()
master2model(self.model_params, self.master_params, self.flat_master) |
def majority_vote(nsqls: List, pred_answer_list: List, allow_none_and_empty_answer: bool=False, allow_error_answer: bool=False, answer_placeholder: Union[(str, int)]='<error|empty>', vote_method: str='prob', answer_biased: Union[(str, int)]=None, answer_biased_weight: float=None):
def _compare_answer_vote_simple(a, b):
if (a[1]['count'] > b[1]['count']):
return 1
elif (a[1]['count'] < b[1]['count']):
return (- 1)
elif (a[1]['nsqls'][0][1] > b[1]['nsqls'][0][1]):
return 1
elif (a[1]['nsqls'][0][1] == b[1]['nsqls'][0][1]):
return 0
else:
return (- 1)
def _compare_answer_vote_with_prob(a, b):
return (1 if (sum([math.exp(nsql[1]) for nsql in a[1]['nsqls']]) > sum([math.exp(nsql[1]) for nsql in b[1]['nsqls']])) else (- 1))
candi_answer_dict = dict()
for ((nsql, logprob), pred_answer) in zip(nsqls, pred_answer_list):
if allow_none_and_empty_answer:
if ((pred_answer == [None]) or (pred_answer == [])):
pred_answer = [answer_placeholder]
if allow_error_answer:
if (pred_answer == '<error>'):
pred_answer = [answer_placeholder]
if ((pred_answer == '<error>') or (pred_answer == [None]) or (pred_answer == [])):
continue
if (candi_answer_dict.get(tuple(pred_answer), None) is None):
candi_answer_dict[tuple(pred_answer)] = {'count': 0, 'nsqls': []}
answer_info = candi_answer_dict.get(tuple(pred_answer), None)
answer_info['count'] += 1
answer_info['nsqls'].append([nsql, logprob])
if (len(candi_answer_dict) == 0):
return (answer_placeholder, [(nsqls[0][0], nsqls[0][(- 1)])])
if (vote_method == 'simple'):
sorted_candi_answer_list = sorted(list(candi_answer_dict.items()), key=cmp_to_key(_compare_answer_vote_simple), reverse=True)
elif (vote_method == 'prob'):
sorted_candi_answer_list = sorted(list(candi_answer_dict.items()), key=cmp_to_key(_compare_answer_vote_with_prob), reverse=True)
elif (vote_method == 'answer_biased'):
assert ((answer_biased_weight is not None) and (answer_biased_weight > 0))
for (answer, answer_dict) in candi_answer_dict.items():
if (answer == (answer_biased,)):
answer_dict['count'] *= answer_biased_weight
sorted_candi_answer_list = sorted(list(candi_answer_dict.items()), key=cmp_to_key(_compare_answer_vote_simple), reverse=True)
elif (vote_method == 'lf_biased'):
for (answer, answer_dict) in candi_answer_dict.items():
count = 0
for (nsql, _) in answer_dict['nsqls']:
if ('' in nsql):
count += 10
elif ('' in nsql):
count += 10
else:
count += 1
answer_dict['count'] = count
sorted_candi_answer_list = sorted(list(candi_answer_dict.items()), key=cmp_to_key(_compare_answer_vote_simple), reverse=True)
else:
raise ValueError(f'Vote method {vote_method} is not supported.')
pred_answer_info = sorted_candi_answer_list[0]
(pred_answer, pred_answer_nsqls) = (list(pred_answer_info[0]), pred_answer_info[1]['nsqls'])
return (pred_answer, pred_answer_nsqls) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, drop=False, block_size=4):
super(Bottleneck, self).__init__()
self.drop = drop
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
if drop:
self.drop1 = DropBlock2d(block_size=block_size)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
if drop:
self.drop2 = DropBlock2d(block_size=block_size)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
if drop:
self.drop3 = DropBlock2d(block_size=block_size)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
if self.drop:
out = self.drop1(F.relu(self.bn1(self.conv1(x))))
out = self.drop2(F.relu(self.bn2(self.conv2(out))))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.drop3(F.relu(out))
else:
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out |
def test_new_project_does_not_fail_pre_commit(cwd, pre_commit, putup):
name = 'my_project'
run(f'{putup} --pre-commit --dsproject -p my_package --namespace my.ns {name}')
with cwd.join(name).as_cwd():
try:
run(f'{pre_commit} install')
run(f'{pre_commit} run --all')
except CalledProcessError as ex:
if ((os.name == 'nt') and ('filename or extension is too long' in ((ex.stdout or '') + (ex.stderr or '')))):
pytest.skip('Sometimes Windows have problems with nested files')
else:
raise |
_infer_shape
_useless
_canonicalize
_specialize
_rewriter([Subtensor])
def local_subtensor_of_alloc(fgraph, node):
if (not isinstance(node.op, Subtensor)):
return False
u = node.inputs[0]
if (u.owner is None):
return False
if (not isinstance(u.owner.op, Alloc)):
return False
slices = get_idx_list(node.inputs, node.op.idx_list)
val = u.owner.inputs[0]
dims = u.owner.inputs[1:]
assert (len(slices) <= len(dims))
n_added_dims = (u.ndim - val.ndim)
nw_dims = []
val_slices = []
for (i, (sl, dim)) in enumerate(zip(slices, dims)):
if (i >= n_added_dims):
if ((val.type.ndim > (i - n_added_dims)) and val.type.broadcastable[(i - n_added_dims)]):
val_slices.append(slice(None))
else:
val_slices.append(sl)
(csl, _) = get_canonical_form_slice(sl, dim)
if (type(csl) is not slice):
pass
else:
nw_dim = (csl.stop - csl.start)
if (csl.step != 1):
nw_dim = ceil_intdiv(nw_dim, csl.step)
nw_dims += [nw_dim]
nw_val = val[tuple(val_slices)]
nw_dims += dims[len(slices):]
if (nw_val.ndim > len(nw_dims)):
return False
rval = alloc(nw_val, *nw_dims)
if (not isinstance(rval, (list, tuple))):
rval = [rval]
return rval |
def load_model(model_version=None):
currentDirectory = os.getcwd()
if (model_version == 'chembl'):
model_name = 'chembl_pretrained'
elif (model_version == 'moses'):
model_name = 'moses_pretrained'
elif (model_version == 'new'):
model_name = 'new_model'
else:
print('No predefined model of that name found. using the default pre-trained MOSES heteroencoder')
model_name = 'moses_pretrained'
path = '{}/moses/latentgan/heteroencoder_models/{}'.format(currentDirectory, model_name)
print('Loading heteroencoder model titled {}'.format(model_version))
print('Path to model file: {}'.format(path))
model = ddc.DDC(model_name=path)
sys.stdout.flush()
return model |
class MDConfig(dict):
def __init__(self, config_name='default'):
self.update(DEFAULT_CONFIG)
self.set_configs(config_name)
def set_configs(self, config_name='default'):
configs = getattr(settings, 'MDEDITOR_CONFIGS', None)
if configs:
if isinstance(configs, dict):
if (config_name in configs):
config = configs[config_name]
if (not isinstance(config, dict)):
raise ImproperlyConfigured(('MDEDITOR_CONFIGS["%s"] setting must be a dictionary type.' % config_name))
self.update(config)
else:
raise ImproperlyConfigured(("No configuration named '%s' found in your CKEDITOR_CONFIGS setting." % config_name))
else:
raise ImproperlyConfigured('MDEDITOR_CONFIGS setting must be a dictionary type.') |
def add_instructions(opts: ScaffoldOpts, content: AbstractContent, file_op: FileOp) -> ResolvedLeaf:
text = structure.reify_content(content, opts)
if (text is not None):
i = text.find(INSERT_AFTER)
assert (i > 0), f'''{INSERT_AFTER!r} not found in README template:
{text}'''
j = (i + len(INSERT_AFTER))
text = ((text[:j] + README_NOTE.format(**opts)) + text[j:])
return (text, file_op) |
class SsoCharacterMgmt(AuxiliaryFrame):
def __init__(self, parent):
super().__init__(parent, id=wx.ID_ANY, title=_t('SSO Character Management'), pos=wx.DefaultPosition, size=wx.Size(550, 250), resizeable=True)
self.mainFrame = parent
mainSizer = wx.BoxSizer(wx.HORIZONTAL)
self.lcCharacters = wx.ListCtrl(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LC_REPORT)
self.lcCharacters.InsertColumn(0, heading=_t('Character'))
self.lcCharacters.InsertColumn(1, heading=_t('Character ID'))
self.popCharList()
mainSizer.Add(self.lcCharacters, 1, (wx.ALL | wx.EXPAND), 5)
btnSizer = wx.BoxSizer(wx.VERTICAL)
self.addBtn = wx.Button(self, wx.ID_ANY, _t('Add Character'), wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer.Add(self.addBtn, 0, (wx.ALL | wx.EXPAND), 5)
self.deleteBtn = wx.Button(self, wx.ID_ANY, _t('Remove Character'), wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer.Add(self.deleteBtn, 0, (wx.ALL | wx.EXPAND), 5)
mainSizer.Add(btnSizer, 0, wx.EXPAND, 5)
self.addBtn.Bind(wx.EVT_BUTTON, self.addChar)
self.deleteBtn.Bind(wx.EVT_BUTTON, self.delChar)
self.mainFrame.Bind(GE.EVT_SSO_LOGIN, self.ssoLogin)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_CHAR_HOOK, self.kbEvent)
self.SetSizer(mainSizer)
self.Layout()
self.SetMinSize(self.GetSize())
self.Centre(wx.BOTH)
def ssoLogin(self, event):
self.popCharList()
sChar = Character.getInstance()
char = sChar.getCharacter(event.character.characterName)
newChar = False
if (char is None):
char = sChar.new(event.character.characterName)
newChar = True
char.setSsoCharacter(event.character, config.getClientSecret())
sChar.apiFetch(char.ID, APIView.fetchCallback)
wx.PostEvent(self.mainFrame, GE.CharListUpdated())
event.Skip()
def kbEvent(self, event):
if ((event.GetKeyCode() == wx.WXK_ESCAPE) and (event.GetModifiers() == wx.MOD_NONE)):
self.Close()
return
event.Skip()
def OnClose(self, event):
self.mainFrame.Unbind(GE.EVT_SSO_LOGIN, handler=self.ssoLogin)
event.Skip()
def popCharList(self):
sEsi = Esi.getInstance()
chars = sEsi.getSsoCharacters()
self.lcCharacters.DeleteAllItems()
for (index, char) in enumerate(chars):
self.lcCharacters.InsertItem(index, char.characterName)
self.lcCharacters.SetItem(index, 1, str(char.characterID))
self.lcCharacters.SetItemData(index, char.ID)
self.lcCharacters.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.lcCharacters.SetColumnWidth(1, wx.LIST_AUTOSIZE)
def addChar(self, event):
try:
sEsi = Esi.getInstance()
sEsi.login()
except (KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
ESIServerExceptionHandler(self, ex)
def delChar(self, event):
item = self.lcCharacters.GetFirstSelected()
if (item > (- 1)):
charID = self.lcCharacters.GetItemData(item)
sEsi = Esi.getInstance()
sEsi.delSsoCharacter(charID)
self.popCharList() |
class SCPIndexDataset(torch.utils.data.Dataset):
def __init__(self, scp_path_list, concat=4, shared_object=None):
self.scp_path_list = scp_path_list
self._sizes = len(self.scp_path_list)
self._dtype = torch.float32
self.concat = concat
if (shared_object is not None):
self.reader = shared_object.reader
else:
self.reader = ArkLoader()
def dtype(self):
return self._dtype
def sizes(self):
return self._sizes
def __len__(self):
return self._sizes
_cache(maxsize=8)
def __getitem__(self, i):
scp_path = self.scp_path_list[i]
mat = self.reader.load_mat(scp_path)
feature_vector = torch.from_numpy(mat)
concat = self.concat
if (concat > 1):
add = ((concat - (feature_vector.size()[0] % concat)) % concat)
z = torch.FloatTensor(add, feature_vector.size()[1]).zero_()
feature_vector = torch.cat((feature_vector, z), 0)
feature_vector = feature_vector.reshape((int((feature_vector.size()[0] / concat)), (feature_vector.size()[1] * concat)))
return feature_vector
def sizes(self):
return self._index.sizes |
def test_keep_alive_return_value(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert (capture == 'Allocating parent.')
with capture:
p.returnChild()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 1))
assert (capture == '\n Allocating child.\n Releasing child.\n ')
with capture:
del p
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == 'Releasing parent.')
with capture:
p = m.Parent()
assert (capture == 'Allocating parent.')
with capture:
p.returnChildKeepAlive()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 2))
assert (capture == 'Allocating child.')
with capture:
del p
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == '\n Releasing parent.\n Releasing child.\n ') |
def test_fermi_hubbard_2x2_spinful_phs():
hubbard_model = fermi_hubbard(2, 2, 1.0, 4.0, chemical_potential=0.5, magnetic_field=0.3, spinless=False, particle_hole_symmetry=True)
assert (str(hubbard_model).strip() == '\n4.0 [] +\n-2.8 [0^ 0] +\n4.0 [0^ 0 1^ 1] +\n-1.0 [0^ 2] +\n-1.0 [0^ 4] +\n-2.2 [1^ 1] +\n-1.0 [1^ 3] +\n-1.0 [1^ 5] +\n-1.0 [2^ 0] +\n-2.8 [2^ 2] +\n4.0 [2^ 2 3^ 3] +\n-1.0 [2^ 6] +\n-1.0 [3^ 1] +\n-2.2 [3^ 3] +\n-1.0 [3^ 7] +\n-1.0 [4^ 0] +\n-2.8 [4^ 4] +\n4.0 [4^ 4 5^ 5] +\n-1.0 [4^ 6] +\n-1.0 [5^ 1] +\n-2.2 [5^ 5] +\n-1.0 [5^ 7] +\n-1.0 [6^ 2] +\n-1.0 [6^ 4] +\n-2.8 [6^ 6] +\n4.0 [6^ 6 7^ 7] +\n-1.0 [7^ 3] +\n-1.0 [7^ 5] +\n-2.2 [7^ 7]\n'.strip()) |
def training_loop(run_dir='.', dataset_kwargs={}, data_loader_kwargs={}, network_kwargs={}, loss_kwargs={}, optimizer_kwargs={}, augment_kwargs=None, seed=0, batch_size=512, batch_gpu=None, total_kimg=200000, ema_halflife_kimg=500, ema_rampup_ratio=0.05, lr_rampup_kimg=10000, loss_scaling=1, kimg_per_tick=50, snapshot_ticks=50, state_dump_ticks=500, resume_pkl=None, resume_state_dump=None, resume_kimg=0, cudnn_benchmark=True, device=torch.device('cuda')):
start_time = time.time()
np.random.seed((((seed * dist.get_world_size()) + dist.get_rank()) % (1 << 31)))
torch.manual_seed(np.random.randint((1 << 31)))
torch.backends.cudnn.benchmark = cudnn_benchmark
torch.backends.cudnn.allow_tf32 = False
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
batch_gpu_total = (batch_size // dist.get_world_size())
if ((batch_gpu is None) or (batch_gpu > batch_gpu_total)):
batch_gpu = batch_gpu_total
num_accumulation_rounds = (batch_gpu_total // batch_gpu)
assert (batch_size == ((batch_gpu * num_accumulation_rounds) * dist.get_world_size()))
dist.print0('Loading dataset...')
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs)
dataset_sampler = misc.InfiniteSampler(dataset=dataset_obj, rank=dist.get_rank(), num_replicas=dist.get_world_size(), seed=seed)
dataset_iterator = iter(torch.utils.data.DataLoader(dataset=dataset_obj, sampler=dataset_sampler, batch_size=batch_gpu, **data_loader_kwargs))
dist.print0('Constructing network...')
interface_kwargs = dict(img_resolution=dataset_obj.resolution, img_channels=dataset_obj.num_channels, label_dim=dataset_obj.label_dim)
net = dnnlib.util.construct_class_by_name(**network_kwargs, **interface_kwargs)
net.train().requires_grad_(True).to(device)
if (dist.get_rank() == 0):
with torch.no_grad():
images = torch.zeros([batch_gpu, net.img_channels, net.img_resolution, net.img_resolution], device=device)
sigma = torch.ones([batch_gpu], device=device)
labels = torch.zeros([batch_gpu, net.label_dim], device=device)
misc.print_module_summary(net, [images, sigma, labels], max_nesting=2)
dist.print0('Setting up optimizer...')
loss_fn = dnnlib.util.construct_class_by_name(**loss_kwargs)
optimizer = dnnlib.util.construct_class_by_name(params=net.parameters(), **optimizer_kwargs)
augment_pipe = (dnnlib.util.construct_class_by_name(**augment_kwargs) if (augment_kwargs is not None) else None)
ddp = torch.nn.parallel.DistributedDataParallel(net, device_ids=[device], broadcast_buffers=False)
ema = copy.deepcopy(net).eval().requires_grad_(False)
if (resume_pkl is not None):
dist.print0(f'Loading network weights from "{resume_pkl}"...')
if (dist.get_rank() != 0):
torch.distributed.barrier()
with dnnlib.util.open_url(resume_pkl, verbose=(dist.get_rank() == 0)) as f:
data = pickle.load(f)
if (dist.get_rank() == 0):
torch.distributed.barrier()
misc.copy_params_and_buffers(src_module=data['ema'], dst_module=net, require_all=False)
misc.copy_params_and_buffers(src_module=data['ema'], dst_module=ema, require_all=False)
del data
if resume_state_dump:
dist.print0(f'Loading training state from "{resume_state_dump}"...')
data = torch.load(resume_state_dump, map_location=torch.device('cpu'))
misc.copy_params_and_buffers(src_module=data['net'], dst_module=net, require_all=True)
optimizer.load_state_dict(data['optimizer_state'])
del data
dist.print0(f'Training for {total_kimg} kimg...')
dist.print0()
cur_nimg = (resume_kimg * 1000)
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = (tick_start_time - start_time)
dist.update_progress((cur_nimg // 1000), total_kimg)
stats_jsonl = None
while True:
optimizer.zero_grad(set_to_none=True)
for round_idx in range(num_accumulation_rounds):
with misc.ddp_sync(ddp, (round_idx == (num_accumulation_rounds - 1))):
(images, labels) = next(dataset_iterator)
images = ((images.to(device).to(torch.float32) / 127.5) - 1)
labels = labels.to(device)
loss = loss_fn(net=ddp, images=images, labels=labels, augment_pipe=augment_pipe)
training_stats.report('Loss/loss', loss)
loss.sum().mul((loss_scaling / batch_gpu_total)).backward()
for g in optimizer.param_groups:
g['lr'] = (optimizer_kwargs['lr'] * min((cur_nimg / max((lr_rampup_kimg * 1000), 1e-08)), 1))
for param in net.parameters():
if (param.grad is not None):
torch.nan_to_num(param.grad, nan=0, posinf=100000.0, neginf=(- 100000.0), out=param.grad)
optimizer.step()
ema_halflife_nimg = (ema_halflife_kimg * 1000)
if (ema_rampup_ratio is not None):
ema_halflife_nimg = min(ema_halflife_nimg, (cur_nimg * ema_rampup_ratio))
ema_beta = (0.5 ** (batch_size / max(ema_halflife_nimg, 1e-08)))
for (p_ema, p_net) in zip(ema.parameters(), net.parameters()):
p_ema.copy_(p_net.detach().lerp(p_ema, ema_beta))
cur_nimg += batch_size
done = (cur_nimg >= (total_kimg * 1000))
if ((not done) and (cur_tick != 0) and (cur_nimg < (tick_start_nimg + (kimg_per_tick * 1000)))):
continue
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', (cur_nimg / 1000.0)):<9.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', (tick_end_time - start_time))):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', (tick_end_time - tick_start_time)):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (((tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg)) * 1000.0)):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', (psutil.Process(os.getpid()).memory_info().rss / (2 ** 30))):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', (torch.cuda.max_memory_allocated(device) / (2 ** 30))):<6.2f}"]
fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', (torch.cuda.max_memory_reserved(device) / (2 ** 30))):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
dist.print0(' '.join(fields))
if ((not done) and dist.should_stop()):
done = True
dist.print0()
dist.print0('Aborting...')
if ((snapshot_ticks is not None) and (done or ((cur_tick % snapshot_ticks) == 0))):
data = dict(ema=ema, loss_fn=loss_fn, augment_pipe=augment_pipe, dataset_kwargs=dict(dataset_kwargs))
for (key, value) in data.items():
if isinstance(value, torch.nn.Module):
value = copy.deepcopy(value).eval().requires_grad_(False)
misc.check_ddp_consistency(value)
data[key] = value.cpu()
del value
if (dist.get_rank() == 0):
with open(os.path.join(run_dir, f'network-snapshot-{(cur_nimg // 1000):06d}.pkl'), 'wb') as f:
pickle.dump(data, f)
del data
if ((state_dump_ticks is not None) and (done or ((cur_tick % state_dump_ticks) == 0)) and (cur_tick != 0) and (dist.get_rank() == 0)):
torch.save(dict(net=net, optimizer_state=optimizer.state_dict()), os.path.join(run_dir, f'training-state-{(cur_nimg // 1000):06d}.pt'))
training_stats.default_collector.update()
if (dist.get_rank() == 0):
if (stats_jsonl is None):
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'at')
stats_jsonl.write((json.dumps(dict(training_stats.default_collector.as_dict(), timestamp=time.time())) + '\n'))
stats_jsonl.flush()
dist.update_progress((cur_nimg // 1000), total_kimg)
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = (tick_start_time - tick_end_time)
if done:
break
dist.print0()
dist.print0('Exiting...') |
def test_alternation_ab(a: FixtureA, b: FixtureB) -> None:
altAB = (a | b)
assert (not altAB.accepts(''))
assert altAB.accepts('a')
assert altAB.accepts('b')
assert (not altAB.accepts('aa'))
assert (not altAB.accepts('ab'))
assert (not altAB.accepts('ba'))
assert (not altAB.accepts('bb')) |
class KnownValues(unittest.TestCase):
def test_h2_gamma(self):
mf = pscf.KRHF(cell).rs_density_fit()
mf.kernel()
self.assertAlmostEqual(mf.e_tot, (- 1.), 7)
def test_h2_kpt1_shiftedcenter(self):
kpts = cell.make_kpts([1, 1, 1], scaled_center=scaled_center)
mf = pscf.KRHF(cell, kpts).rs_density_fit()
mf.kernel()
self.assertAlmostEqual(mf.e_tot, (- 0.), 7)
def test_h2_jonly_k211(self):
kpts = cell.make_kpts([2, 1, 1])
mf = pscf.KRKS(cell, kpts).rs_density_fit()
mf.xc = 'pbe'
mf.kernel()
self.assertAlmostEqual(mf.e_tot, (- 1.), 5)
def test_h2_jonly_k211_shiftedcenter(self):
kpts = cell.make_kpts([2, 1, 1], scaled_center=scaled_center)
mf = pscf.KRKS(cell, kpts).rs_density_fit()
mf.xc = 'pbe'
mf.kernel()
self.assertAlmostEqual(mf.e_tot, (- 1.), 5)
def test_h2_jk_k211(self):
kpts = cell.make_kpts([2, 1, 1])
mf = pscf.KRHF(cell, kpts).rs_density_fit()
mf.kernel()
self.assertAlmostEqual(mf.e_tot, (- 0.), 5)
def test_h2_jk_k211_shiftedcenter(self):
kpts = cell.make_kpts([2, 1, 1], scaled_center=scaled_center)
mf = pscf.KRHF(cell, kpts).rs_density_fit()
mf.kernel()
self.assertAlmostEqual(mf.e_tot, (- 0.), 5) |
def Increment(new, mirror, incpref, inc_time=None):
log.Log('Incrementing mirror file {mf}'.format(mf=mirror), log.INFO)
if (((new and new.isdir()) or mirror.isdir()) and (not incpref.lstat())):
incpref.mkdir()
if (not mirror.lstat()):
incrp = _make_missing_increment(incpref, inc_time)
elif mirror.isdir():
incrp = _make_dir_increment(mirror, incpref, inc_time)
elif (new.isreg() and mirror.isreg()):
incrp = _make_diff_increment(new, mirror, incpref, inc_time)
else:
incrp = _make_snapshot_increment(mirror, incpref, inc_time)
statistics.process_increment(incrp)
return incrp |
class EvoNorm2dS1(nn.Module):
def __init__(self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-05, **_):
super().__init__()
act_layer = (act_layer or nn.SiLU)
self.apply_act = apply_act
if ((act_layer is not None) and apply_act):
self.act = create_act_layer(act_layer)
else:
self.act = nn.Identity()
if group_size:
assert ((num_features % group_size) == 0)
self.groups = (num_features // group_size)
else:
self.groups = groups
self.eps = eps
self.pre_act_norm = False
self.weight = nn.Parameter(torch.ones(num_features))
self.bias = nn.Parameter(torch.zeros(num_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
def forward(self, x):
_assert((x.dim() == 4), 'expected 4D input')
x_dtype = x.dtype
v_shape = (1, (- 1), 1, 1)
if self.apply_act:
x = (self.act(x) / group_std(x, self.groups, self.eps))
return ((x * self.weight.view(v_shape).to(x_dtype)) + self.bias.view(v_shape).to(x_dtype)) |
def _get_weight_tensor_transpose_reshape(conv_linear: LayerType) -> libpymo.TensorParams():
weight_tensor = libpymo.TensorParams()
weight = conv_linear.get_weights()[0]
shape = weight.shape
if isinstance(conv_linear, tf.keras.layers.DepthwiseConv2D):
weight = np.transpose(weight, (2, 3, 0, 1))
shape = np.array([shape[2], shape[3], shape[0], shape[1]])
elif isinstance(conv_linear, tf.keras.layers.Dense):
shape = np.concatenate((np.array([1, 1]), shape))
weight = np.transpose(weight, (1, 0))
shape = np.array([shape[3], shape[2], shape[0], shape[1]])
elif isinstance(conv_linear, tf.keras.layers.Conv2DTranspose):
weight = np.transpose(weight, (2, 3, 0, 1))
shape = np.array([shape[2], shape[3], shape[0], shape[1]])
elif isinstance(conv_linear, tf.keras.layers.Conv2D):
weight = np.transpose(weight, (3, 2, 0, 1))
shape = np.array([shape[3], shape[2], shape[0], shape[1]])
else:
_logger.error('_get_weight_tensor_transpose_reshape(): Operation type unsupported')
weight_tensor.data = weight.reshape((- 1))
weight_tensor.shape = shape
return weight_tensor |
class SPSA(Optimizer):
_C0 = ((2 * np.pi) * 0.1)
_OPTIONS = ['save_steps', 'last_avg']
def __init__(self, maxiter: int=1000, save_steps: int=1, last_avg: int=1, c0: float=_C0, c1: float=0.1, c2: float=0.602, c3: float=0.101, c4: float=0, skip_calibration: bool=False, max_trials: Optional[int]=None) -> None:
validate_min('save_steps', save_steps, 1)
validate_min('last_avg', last_avg, 1)
super().__init__()
if (max_trials is not None):
warnings.warn('The max_trials parameter is deprecated as of 0.8.0 and will be removed no sooner than 3 months after the release. You should use maxiter instead.', DeprecationWarning)
maxiter = max_trials
for (k, v) in list(locals().items()):
if (k in self._OPTIONS):
self._options[k] = v
self._maxiter = maxiter
self._parameters = np.array([c0, c1, c2, c3, c4])
self._skip_calibration = skip_calibration
def get_support_level(self):
return {'gradient': OptimizerSupportLevel.ignored, 'bounds': OptimizerSupportLevel.ignored, 'initial_point': OptimizerSupportLevel.required}
def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
if (not isinstance(initial_point, np.ndarray)):
initial_point = np.asarray(initial_point)
logger.debug('Parameters: %s', self._parameters)
if (not self._skip_calibration):
num_steps_calibration = min(25, max(1, (self._maxiter // 5)))
self._calibration(objective_function, initial_point, num_steps_calibration)
else:
logger.debug('Skipping calibration, parameters used as provided.')
(opt, sol, _, _, _, _) = self._optimization(objective_function, initial_point, maxiter=self._maxiter, **self._options)
return (sol, opt, None)
def _optimization(self, obj_fun: Callable, initial_theta: np.ndarray, maxiter: int, save_steps: int=1, last_avg: int=1) -> List:
theta_plus_save = []
theta_minus_save = []
cost_plus_save = []
cost_minus_save = []
theta = initial_theta
theta_best = np.zeros(initial_theta.shape)
for k in range(maxiter):
a_spsa = (float(self._parameters[0]) / np.power(((k + 1) + self._parameters[4]), self._parameters[2]))
c_spsa = (float(self._parameters[1]) / np.power((k + 1), self._parameters[3]))
delta = ((2 * aqua_globals.random.integers(2, size=np.shape(initial_theta)[0])) - 1)
theta_plus = (theta + (c_spsa * delta))
theta_minus = (theta - (c_spsa * delta))
if (self._max_evals_grouped > 1):
(cost_plus, cost_minus) = obj_fun(np.concatenate((theta_plus, theta_minus)))
else:
cost_plus = obj_fun(theta_plus)
cost_minus = obj_fun(theta_minus)
g_spsa = (((cost_plus - cost_minus) * delta) / (2.0 * c_spsa))
theta = (theta - (a_spsa * g_spsa))
if ((k % save_steps) == 0):
logger.debug('Objective function at theta+ for step # %s: %1.7f', k, cost_plus)
logger.debug('Objective function at theta- for step # %s: %1.7f', k, cost_minus)
theta_plus_save.append(theta_plus)
theta_minus_save.append(theta_minus)
cost_plus_save.append(cost_plus)
cost_minus_save.append(cost_minus)
if (k >= (maxiter - last_avg)):
theta_best += (theta / last_avg)
cost_final = obj_fun(theta_best)
logger.debug('Final objective function is: %.7f', cost_final)
return [cost_final, theta_best, cost_plus_save, cost_minus_save, theta_plus_save, theta_minus_save]
def _calibration(self, obj_fun: Callable, initial_theta: np.ndarray, stat: int):
target_update = self._parameters[0]
initial_c = self._parameters[1]
delta_obj = 0
logger.debug('Calibration...')
for i in range(stat):
if ((i % 5) == 0):
logger.debug('calibration step # %s of %s', str(i), str(stat))
delta = ((2 * aqua_globals.random.integers(2, size=np.shape(initial_theta)[0])) - 1)
theta_plus = (initial_theta + (initial_c * delta))
theta_minus = (initial_theta - (initial_c * delta))
if (self._max_evals_grouped > 1):
(obj_plus, obj_minus) = obj_fun(np.concatenate((theta_plus, theta_minus)))
else:
obj_plus = obj_fun(theta_plus)
obj_minus = obj_fun(theta_minus)
delta_obj += (np.absolute((obj_plus - obj_minus)) / stat)
if (delta_obj > 0):
self._parameters[0] = ((((target_update * 2) / delta_obj) * self._parameters[1]) * (self._parameters[4] + 1))
logger.debug('delta_obj is 0, not calibrating (since this would set c0 to inf)')
logger.debug('Calibrated SPSA parameter c0 is %.7f', self._parameters[0]) |
def log_events(klass: Type[QObject]) -> Type[QObject]:
old_event = klass.event
(old_event)
def new_event(self: Any, e: QEvent) -> bool:
log.misc.debug('Event in {}: {}'.format(utils.qualname(klass), qenum_key(QEvent, e.type(), klass=QEvent.Type)))
return old_event(self, e)
klass.event = new_event
return klass |
class Project(MPTTModel, Model):
objects = ProjectManager()
parent = TreeForeignKey('self', null=True, blank=True, on_delete=models.DO_NOTHING, related_name='children', db_index=True, verbose_name=_('Parent project'), help_text=_('The parent project of this project.'))
user = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Membership', related_name='projects', verbose_name=_('User'), help_text=_('The list of users for this project.'))
site = models.ForeignKey(Site, on_delete=models.SET_NULL, null=True, verbose_name=_('Site'), help_text=_('The site this project belongs to (in a multi site setup).'))
title = models.CharField(max_length=256, verbose_name=_('Title'), help_text=_('The title for this project.'))
description = models.TextField(blank=True, verbose_name=_('Description'), help_text=_('A description for this project (optional).'))
catalog = models.ForeignKey(Catalog, related_name='projects', on_delete=models.SET_NULL, null=True, verbose_name=_('Catalog'), help_text=_('The catalog which will be used for this project.'))
tasks = models.ManyToManyField(Task, blank=True, through='Issue', related_name='projects', verbose_name=_('Tasks'), help_text=_('The tasks that will be used for this project.'))
views = models.ManyToManyField(View, blank=True, related_name='projects', verbose_name=_('Views'), help_text=_('The views that will be used for this project.'))
progress_total = models.IntegerField(null=True, verbose_name=_('Progress total'), help_text=_('The total number of expected values for the progress bar.'))
progress_count = models.IntegerField(null=True, verbose_name=_('Progress count'), help_text=_('The number of values for the progress bar.'))
class Meta():
ordering = ('tree_id', 'level', 'title')
verbose_name = _('Project')
verbose_name_plural = _('Projects')
class MPTTMeta():
order_insertion_by = ('title',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('project', kwargs={'pk': self.pk})
def clean(self):
if (self.id and (self.parent in self.get_descendants(include_self=True))):
raise ValidationError({'parent': [_('A project may not be moved to be a child of itself or one of its descendants.')]})
def catalog_uri(self):
if (self.catalog is not None):
return self.catalog.uri
_property
def member(self):
return self.user.all()
_property
def owners_str(self):
return ', '.join([('' if (x is None) else str(x)) for x in self.user.filter(membership__role='owner')])
_property
def owners(self):
return self.user.filter(memberships__role='owner')
_property
def managers(self):
return self.user.filter(memberships__role='manager')
_property
def authors(self):
return self.user.filter(memberships__role='author')
_property
def guests(self):
return self.user.filter(memberships__role='guest')
def file_size(self):
queryset = self.values.filter(snapshot=None).exclude((models.Q(file='') | models.Q(file=None)))
return sum([value.file.size for value in queryset]) |
def collate_fn_all_des(batch):
(obj_point_list, obj_label_list) = ([], [])
rel_label_list = []
(edge_indices, descriptor) = ([], [])
count = 0
for i in batch:
obj_point_list.append(i[0])
obj_label_list.append(i[2])
rel_label_list.append(i[3])
edge_indices.append((i[4] + count))
descriptor.append(i[5])
count += i[0].shape[0]
return (torch.cat(obj_point_list, dim=0), torch.cat(obj_label_list, dim=0), torch.cat(rel_label_list, dim=0), torch.cat(edge_indices, dim=0), torch.cat(descriptor, dim=0)) |
class TestGaussianProcess(GaussianProcessTestCase):
precompute_gaussian_process_data = True
(autouse=True, scope='class')
def base_setup(cls):
numpy.random.seed(8794)
super(TestGaussianProcess, cls).base_setup()
def test_sample_point_from_gp(self):
point_one = SamplePoint([0.0, 1.0], (- 1.0), 0.0)
point_two = SamplePoint([2.0, 2.5], 1.0, 0.1)
covariance = SquareExponential([1.0, 1.0, 1.0])
historical_data = HistoricalData(len(point_one.point), [point_one, point_two])
gaussian_process = GaussianProcess(covariance, historical_data)
out_values = numpy.zeros(3)
for i in xrange(3):
out_values[i] = gaussian_process.sample_point_from_gp(point_two.point, 0.001)
gaussian_process._gaussian_process.reset_to_most_recent_seed()
out_values_test = numpy.ones(3)
for i in xrange(3):
out_values_test[i] = gaussian_process.sample_point_from_gp(point_two.point, 0.001)
self.assert_vector_within_relative(out_values_test, out_values, 0.0)
value = gaussian_process.sample_point_from_gp(point_one.point, 0.0)
self.assert_scalar_within_relative(value, point_one.value, numpy.finfo(numpy.float64).eps)
def test_gp_construction_singular_covariance_matrix(self):
index = numpy.argmax(numpy.greater_equal(self.num_sampled_list, 1))
(domain, gaussian_process) = self.gp_test_environments[index]
point_one = SamplePoint(([0.0] * domain.dim), 1.0, 0.0)
point_two = SamplePoint(([1.0] * domain.dim), 1.0, 0.0)
point_three = point_two
historical_data = HistoricalData(len(point_one.point), [point_one, point_two, point_three])
with pytest.raises(C_GP.SingularMatrixException):
GaussianProcess(gaussian_process.get_covariance_copy(), historical_data)
def test_gp_add_sampled_points_singular_covariance_matrix(self):
test_environment_input = copy.copy(self.gp_test_environment_input)
test_environment_input.num_sampled = 1
test_environment_input.gaussian_process_class = GaussianProcess
(_, gaussian_process) = self._build_gaussian_process_test_data(test_environment_input)
point_one = SamplePoint(([0.5] * gaussian_process.dim), 1.0, 0.0)
point_two = SamplePoint(([1.0] * gaussian_process.dim), (- 1.0), 0.0)
point_three = point_one
gaussian_process.add_sampled_points([point_one, point_two])
with pytest.raises(C_GP.SingularMatrixException):
gaussian_process.add_sampled_points([point_three])
def test_python_and_cpp_return_same_mu_and_gradient(self):
num_tests_per_case = 4
mu_tolerance = 3e-13
grad_mu_tolerance = 3e-12
for test_case in self.gp_test_environments:
(domain, python_gp) = test_case
(python_cov, historical_data) = python_gp.get_core_data_copy()
cpp_cov = SquareExponential(python_cov.hyperparameters)
cpp_gp = GaussianProcess(cpp_cov, historical_data)
for num_to_sample in self.num_to_sample_list:
for _ in xrange(num_tests_per_case):
points_to_sample = domain.generate_uniform_random_points_in_domain(num_to_sample)
cpp_mu = cpp_gp.compute_mean_of_points(points_to_sample)
python_mu = python_gp.compute_mean_of_points(points_to_sample)
self.assert_vector_within_relative(python_mu, cpp_mu, mu_tolerance)
cpp_grad_mu = cpp_gp.compute_grad_mean_of_points(points_to_sample)
python_grad_mu = python_gp.compute_grad_mean_of_points(points_to_sample)
self.assert_vector_within_relative(python_grad_mu, cpp_grad_mu, grad_mu_tolerance)
def test_python_and_cpp_return_same_variance_and_gradient(self):
num_tests_per_case = 2
var_tolerance = 3e-13
grad_var_tolerance = 3e-12
for test_case in self.gp_test_environments:
(domain, python_gp) = test_case
(python_cov, historical_data) = python_gp.get_core_data_copy()
cpp_cov = SquareExponential(python_cov.hyperparameters)
cpp_gp = GaussianProcess(cpp_cov, historical_data)
for num_to_sample in self.num_to_sample_list:
for _ in xrange(num_tests_per_case):
points_to_sample = domain.generate_uniform_random_points_in_domain(num_to_sample)
cpp_var = cpp_gp.compute_variance_of_points(points_to_sample)
python_var = python_gp.compute_variance_of_points(points_to_sample)
self.assert_vector_within_relative(python_var, cpp_var, var_tolerance)
cpp_grad_var = cpp_gp.compute_grad_variance_of_points(points_to_sample)
python_grad_var = python_gp.compute_grad_variance_of_points(points_to_sample)
self.assert_vector_within_relative(python_grad_var, cpp_grad_var, grad_var_tolerance)
def test_python_and_cpp_return_same_cholesky_variance_and_gradient(self):
num_tests_per_case = 2
var_tolerance = 3e-12
grad_var_tolerance = 3e-10
for test_case in self.gp_test_environments:
(domain, python_gp) = test_case
(python_cov, historical_data) = python_gp.get_core_data_copy()
cpp_cov = SquareExponential(python_cov.hyperparameters)
cpp_gp = GaussianProcess(cpp_cov, historical_data)
for num_to_sample in self.num_to_sample_list:
for _ in xrange(num_tests_per_case):
points_to_sample = domain.generate_uniform_random_points_in_domain(num_to_sample)
cpp_var = cpp_gp.compute_cholesky_variance_of_points(points_to_sample)
python_var = python_gp.compute_cholesky_variance_of_points(points_to_sample)
self.assert_vector_within_relative(python_var, cpp_var, var_tolerance)
cpp_grad_var = cpp_gp.compute_grad_cholesky_variance_of_points(points_to_sample)
python_grad_var = python_gp.compute_grad_cholesky_variance_of_points(points_to_sample)
self.assert_vector_within_relative(python_grad_var, cpp_grad_var, grad_var_tolerance) |
def test_interactive_with_dependencies_and_no_selection(tester: CommandTester, repo: TestRepository) -> None:
repo.add_package(get_package('django-pendulum', '0.1.6-pre4'))
repo.add_package(get_package('pendulum', '2.0.0'))
repo.add_package(get_package('pytest', '3.6.0'))
inputs = ['my-package', '1.2.3', 'This is a description', 'n', 'MIT', '~2.7 || ^3.6', '', 'pendulu', '', '', '', 'pytest', '', '', '', '\n']
tester.execute(inputs='\n'.join(inputs))
expected = '[tool.poetry]\nname = "my-package"\nversion = "1.2.3"\ndescription = "This is a description"\nauthors = ["Your Name <>"]\nlicense = "MIT"\nreadme = "README.md"\n\n[tool.poetry.dependencies]\npython = "~2.7 || ^3.6"\n'
assert (expected in tester.io.fetch_output()) |
def _label_nodes_by_identity(intralayer_graphs, interlayer_edges, layer_vec):
namedict = {}
backedges = {}
for e in interlayer_edges:
(ei, ej) = (e[0], e[1])
if (ei < ej):
backedges[ej] = (backedges.get(ej, []) + [ei])
else:
backedges[ei] = (backedges.get(ei, []) + [ej])
offset = 0
for (i, lay) in enumerate(layer_vec):
if (i not in backedges):
namedict[i] = (i - offset)
else:
pred = backedges[i][0]
namedict[i] = namedict[pred]
offset += 1
for graph in intralayer_graphs:
graph.vs['shared_id'] = list(map((lambda x: namedict[x]), graph.vs['nid']))
assert (len(set(graph.vs['shared_id'])) == len(graph.vs['shared_id'])), 'IDs within a slice must all be unique' |
class ControlTabs(QtWidgets.QTabWidget):
def __init__(self, *args, m=None, **kwargs):
super().__init__(*args, **kwargs)
self.m = m
self.tab_compare = CompareTab(m=self.m)
self.tab_open = OpenFileTabs(m=self.m)
self.tab_edit = ArtistEditor(m=self.m)
self.addTab(self.tab_compare, 'Compare')
self.addTab(self.tab_edit, 'Edit')
self.addTab(self.tab_open, 'Data')
self.currentChanged.connect(self.tabchanged)
self.setAcceptDrops(True)
self.setStyleSheet('\n ControlTabs {\n font-size: 10pt;\n font-weight: bold;\n }\n\n QTabWidget::pane {\n border: 0px;\n top:0px;\n background: rgb(240, 240, 240);\n border-radius: 10px;\n }\n\n QTabBar::tab {\n background: rgb(240, 240, 240);\n border: 0px;\n padding: 3px;\n padding-bottom: 6px;\n padding-left: 6px;\n padding-right: 6px;\n margin-left: 10px;\n margin-bottom: -2px;\n border-radius: 4px;\n }\n\n QTabBar::tab:selected {\n background: rgb(200, 200, 200);\n border:1px solid rgb(150, 150, 150);\n margin-bottom: 2px;\n }\n ')
()
def tabchanged(self):
if (self.currentWidget() == self.tab_compare):
self.tab_compare.layer_tabs.repopulate_and_activate_current()
elif (self.currentWidget() == self.tab_edit):
self.tab_edit.artist_tabs.repopulate_and_activate_current()
def dragEnterEvent(self, e):
self.tab_open.dragEnterEvent(e)
def dragLeaveEvent(self, e):
self.tab_open.dragLeaveEvent(e)
def dropEvent(self, e):
self.tab_open.dropEvent(e) |
class TestHuffman():
def test_request_huffman_decoder(self):
assert (decode_huffman(b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff') == b'www.example.com')
assert (decode_huffman(b'\xa8\xeb\x10d\x9c\xbf') == b'no-cache')
assert (decode_huffman(b'%\xa8I\xe9[\xa9}\x7f') == b'custom-key')
assert (decode_huffman(b'%\xa8I\xe9[\xb8\xe8\xb4\xbf') == b'custom-value')
def test_request_huffman_encode(self):
encoder = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH)
assert (encoder.encode(b'www.example.com') == b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff')
assert (encoder.encode(b'no-cache') == b'\xa8\xeb\x10d\x9c\xbf')
assert (encoder.encode(b'custom-key') == b'%\xa8I\xe9[\xa9}\x7f')
assert (encoder.encode(b'custom-value') == b'%\xa8I\xe9[\xb8\xe8\xb4\xbf') |
def _parse_path(path):
if isinstance(path, _Path):
return path
elif (pathlib and isinstance(path, pathlib.PurePath)):
return _ParsedPath(path.as_posix(), None, None)
elif isinstance(path, str):
if ((sys.platform == 'win32') and re.match('^[a-zA-Z]\\:', path)):
if pathlib:
return _ParsedPath(pathlib.Path(path).as_posix(), None, None)
else:
return _UnparsedPath(path)
elif path.startswith('/vsi'):
return _UnparsedPath(path)
else:
parts = urlparse(path)
else:
raise PathError("invalid path '{!r}'".format(path))
if parts.scheme:
if all(((p in SCHEMES) for p in parts.scheme.split('+'))):
return _ParsedPath.from_uri(path)
return _UnparsedPath(path) |
def hydra_init(cfg_name='config') -> None:
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=FairseqConfig)
for k in FairseqConfig.__dataclass_fields__:
v = FairseqConfig.__dataclass_fields__[k].default
try:
cs.store(name=k, node=v)
except BaseException:
logger.error(f'{k} - {v}')
raise |
def init_eigenstate_visualization(eigenstates):
if (eigenstates.type == 'SingleParticle1D'):
return VisualizationSingleParticle1D(eigenstates)
elif (eigenstates.type == 'SingleParticle2D'):
return VisualizationSingleParticle2D(eigenstates)
elif (eigenstates.type == 'SingleParticle3D'):
from .single_particle_3D import VisualizationSingleParticle3D
return VisualizationSingleParticle3D(eigenstates)
elif (eigenstates.type == 'TwoIdenticalParticles1D'):
return VisualizationIdenticalParticles1D(eigenstates) |
class JobList(ListView):
template_name = 'jobs_list.html'
context_object_name = 'jobs'
paginate_by = 20
paginator_class = DiggPaginator
model = JobItem
def get_queryset(self):
jobs = super().get_queryset()
search = self.request.GET.get('q')
if search:
filters = (Q(title__icontains=search) | Q(description__icontains=search))
jobs = jobs.filter(filters)
jobs = jobs.order_by('-created_at')
return jobs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['active_menu_item'] = 'jobs'
return context |
class FragDB():
def __init__(self, metadata, options, db, c):
self.metadata = metadata
self.db = db
self.c = c
self.options = options
def get(self, id):
obj = select_fragment_record_by_title(self.c, id)
if (obj is not None):
return obj
return select_fragment_error_record_by_title(self.c, id)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
(c, db) = (self.c, self.db)
if (self.c is not None):
self.c = self.db = None
c.close()
db.rollback()
def __iter__(self):
return iter_fragment_records(self.db.cursor(), self.db.cursor())
def iter_error_records(self):
return iter_fragment_error_records(self.db.cursor(), self.db.cursor())
def cursor(self):
return self.db.cursor() |
def multiplicative_rlattention(queries, keys, values, bias, sample, keep_prob=None, name=None, epsilon=1e-06):
with tf.name_scope(name, default_name='multiplicative_rlattention', values=[queries, keys, values, bias]):
logits = tf.matmul(queries, keys, transpose_b=True)
if (bias is not None):
logits += bias
logits_exp = tf.exp(logits)
logits_exp *= sample
logits_sum = tf.reduce_sum(logits_exp, axis=(- 1), keepdims=True)
weights = (logits_exp / (logits_sum + epsilon))
weights = tf.Print(weights, [weights], summarize=)
if ((keep_prob is not None) and (keep_prob < 1.0)):
weights = tf.nn.dropout(weights, keep_prob)
outputs = tf.matmul(weights, values)
return {'weights': weights, 'outputs': outputs} |
class SmartBulb(SmartDevice):
LIGHT_SERVICE = 'smartlife.iot.smartbulb.lightingservice'
SET_LIGHT_METHOD = 'transition_light_state'
emeter_type = 'smartlife.iot.common.emeter'
def __init__(self, host: str, *, config: Optional[DeviceConfig]=None, protocol: Optional[TPLinkProtocol]=None) -> None:
super().__init__(host=host, config=config, protocol=protocol)
self._device_type = DeviceType.Bulb
self.add_module('schedule', Schedule(self, 'smartlife.iot.common.schedule'))
self.add_module('usage', Usage(self, 'smartlife.iot.common.schedule'))
self.add_module('antitheft', Antitheft(self, 'smartlife.iot.common.anti_theft'))
self.add_module('time', Time(self, 'smartlife.iot.common.timesetting'))
self.add_module('emeter', Emeter(self, self.emeter_type))
self.add_module('countdown', Countdown(self, 'countdown'))
self.add_module('cloud', Cloud(self, 'smartlife.iot.common.cloud'))
_update
def is_color(self) -> bool:
sys_info = self.sys_info
return bool(sys_info['is_color'])
_update
def is_dimmable(self) -> bool:
sys_info = self.sys_info
return bool(sys_info['is_dimmable'])
_update
def is_variable_color_temp(self) -> bool:
sys_info = self.sys_info
return bool(sys_info['is_variable_color_temp'])
_update
def valid_temperature_range(self) -> ColorTempRange:
if (not self.is_variable_color_temp):
raise SmartDeviceException('Color temperature not supported')
for (model, temp_range) in TPLINK_KELVIN.items():
sys_info = self.sys_info
if re.match(model, sys_info['model']):
return temp_range
_LOGGER.warning('Unknown color temperature range, fallback to 2700-5000')
return ColorTempRange(2700, 5000)
_update
def light_state(self) -> Dict[(str, str)]:
light_state = self.sys_info['light_state']
if (light_state is None):
raise SmartDeviceException('The device has no light_state or you have not called update()')
is_on = light_state['on_off']
if (not is_on):
off_state = {**light_state['dft_on_state'], 'on_off': is_on}
return cast(dict, off_state)
return light_state
_update
def has_effects(self) -> bool:
return ('lighting_effect_state' in self.sys_info)
async def get_light_details(self) -> Dict[(str, int)]:
return (await self._query_helper(self.LIGHT_SERVICE, 'get_light_details'))
async def get_turn_on_behavior(self) -> TurnOnBehaviors:
return TurnOnBehaviors.parse_obj((await self._query_helper(self.LIGHT_SERVICE, 'get_default_behavior')))
async def set_turn_on_behavior(self, behavior: TurnOnBehaviors):
return (await self._query_helper(self.LIGHT_SERVICE, 'set_default_behavior', behavior.dict(by_alias=True)))
async def get_light_state(self) -> Dict[(str, Dict)]:
return (await self._query_helper(self.LIGHT_SERVICE, 'get_light_state'))
async def set_light_state(self, state: Dict, *, transition: Optional[int]=None) -> Dict:
if (transition is not None):
state['transition_period'] = transition
if ('on_off' not in state):
state['on_off'] = 1
if (state['on_off'] and NON_COLOR_MODE_FLAGS.issuperset(state)):
state['ignore_default'] = 0
else:
state['ignore_default'] = 1
light_state = (await self._query_helper(self.LIGHT_SERVICE, self.SET_LIGHT_METHOD, state))
return light_state
_update
def hsv(self) -> HSV:
if (not self.is_color):
raise SmartDeviceException('Bulb does not support color.')
light_state = cast(dict, self.light_state)
hue = light_state['hue']
saturation = light_state['saturation']
value = light_state['brightness']
return HSV(hue, saturation, value)
def _raise_for_invalid_brightness(self, value):
if ((not isinstance(value, int)) or (not (0 <= value <= 100))):
raise ValueError(f'Invalid brightness value: {value} (valid range: 0-100%)')
_update
async def set_hsv(self, hue: int, saturation: int, value: Optional[int]=None, *, transition: Optional[int]=None) -> Dict:
if (not self.is_color):
raise SmartDeviceException('Bulb does not support color.')
if ((not isinstance(hue, int)) or (not (0 <= hue <= 360))):
raise ValueError(f'Invalid hue value: {hue} (valid range: 0-360)')
if ((not isinstance(saturation, int)) or (not (0 <= saturation <= 100))):
raise ValueError(f'Invalid saturation value: {saturation} (valid range: 0-100%)')
light_state = {'hue': hue, 'saturation': saturation, 'color_temp': 0}
if (value is not None):
self._raise_for_invalid_brightness(value)
light_state['brightness'] = value
return (await self.set_light_state(light_state, transition=transition))
_update
def color_temp(self) -> int:
if (not self.is_variable_color_temp):
raise SmartDeviceException('Bulb does not support colortemp.')
light_state = self.light_state
return int(light_state['color_temp'])
_update
async def set_color_temp(self, temp: int, *, brightness=None, transition: Optional[int]=None) -> Dict:
if (not self.is_variable_color_temp):
raise SmartDeviceException('Bulb does not support colortemp.')
valid_temperature_range = self.valid_temperature_range
if ((temp < valid_temperature_range[0]) or (temp > valid_temperature_range[1])):
raise ValueError('Temperature should be between {} and {}, was {}'.format(*valid_temperature_range, temp))
light_state = {'color_temp': temp}
if (brightness is not None):
light_state['brightness'] = brightness
return (await self.set_light_state(light_state, transition=transition))
_update
def brightness(self) -> int:
if (not self.is_dimmable):
raise SmartDeviceException('Bulb is not dimmable.')
light_state = self.light_state
return int(light_state['brightness'])
_update
async def set_brightness(self, brightness: int, *, transition: Optional[int]=None) -> Dict:
if (not self.is_dimmable):
raise SmartDeviceException('Bulb is not dimmable.')
self._raise_for_invalid_brightness(brightness)
light_state = {'brightness': brightness}
return (await self.set_light_state(light_state, transition=transition))
_update
def state_information(self) -> Dict[(str, Any)]:
info: Dict[(str, Any)] = {'Brightness': self.brightness, 'Is dimmable': self.is_dimmable}
if self.is_variable_color_temp:
info['Color temperature'] = self.color_temp
info['Valid temperature range'] = self.valid_temperature_range
if self.is_color:
info['HSV'] = self.hsv
info['Presets'] = self.presets
return info
_update
def is_on(self) -> bool:
light_state = self.light_state
return bool(light_state['on_off'])
async def turn_off(self, *, transition: Optional[int]=None, **kwargs) -> Dict:
return (await self.set_light_state({'on_off': 0}, transition=transition))
async def turn_on(self, *, transition: Optional[int]=None, **kwargs) -> Dict:
return (await self.set_light_state({'on_off': 1}, transition=transition))
_update
def has_emeter(self) -> bool:
return True
async def set_alias(self, alias: str) -> None:
return (await self._query_helper('smartlife.iot.common.system', 'set_dev_alias', {'alias': alias}))
_update
def presets(self) -> List[SmartBulbPreset]:
return [SmartBulbPreset(**vals) for vals in self.sys_info['preferred_state']]
async def save_preset(self, preset: SmartBulbPreset):
if (len(self.presets) == 0):
raise SmartDeviceException('Device does not supported saving presets')
if (preset.index >= len(self.presets)):
raise SmartDeviceException('Invalid preset index')
return (await self._query_helper(self.LIGHT_SERVICE, 'set_preferred_state', preset.dict(exclude_none=True)))
def max_device_response_size(self) -> int:
return 4096 |
def load_and_covnert_case(input_image: str, input_seg: str, output_image: str, output_seg: str, min_component_size: int=50):
seg = io.imread(input_seg)
assert ((np.unique(seg)[0] == 0) and ((np.unique(seg)[1] == 255) or (np.unique(seg)[1] == 1)))
seg[(seg == 255)] = 1
image = io.imread(input_image)
mask = seg
io.imsave(output_seg, mask, check_contrast=False)
io.imsave(output_image, image) |
def transform_import(builder: IRBuilder, node: Import) -> None:
if node.is_mypy_only:
return
if (not node.is_top_level):
globals = builder.load_globals_dict()
for (mod_id, as_name) in node.ids:
builder.gen_import(mod_id, node.line)
(globals_id, globals_name) = import_globals_id_and_name(mod_id, as_name)
builder.gen_method_call(globals, '__setitem__', [builder.load_str(globals_name), builder.get_module(globals_id, node.line)], result_type=None, line=node.line)
return
if (node not in builder.module_import_groups):
return
modules = []
static_ptrs = []
mod_lines = []
for import_node in builder.module_import_groups[node]:
for (mod_id, as_name) in import_node.ids:
builder.imports[mod_id] = None
modules.append((mod_id, *import_globals_id_and_name(mod_id, as_name)))
mod_static = LoadStatic(object_rprimitive, mod_id, namespace=NAMESPACE_MODULE)
static_ptrs.append(builder.add(LoadAddress(object_pointer_rprimitive, mod_static)))
mod_lines.append(Integer(import_node.line, c_pyssize_t_rprimitive))
static_array_ptr = builder.builder.setup_rarray(object_pointer_rprimitive, static_ptrs)
import_line_ptr = builder.builder.setup_rarray(c_pyssize_t_rprimitive, mod_lines)
builder.call_c(import_many_op, [builder.add(LoadLiteral(tuple(modules), object_rprimitive)), static_array_ptr, builder.load_globals_dict(), builder.load_str(builder.module_path), builder.load_str(builder.fn_info.name), import_line_ptr], NO_TRACEBACK_LINE_NO) |
class NLLModel(nn.Module):
def __init__(self, args, config):
super().__init__()
self.args = args
self.models = nn.ModuleList()
self.device = [(i % args.n_gpu) for i in range(args.n_model)]
self.loss_fnt = nn.CrossEntropyLoss()
for i in range(args.n_model):
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, config=config)
model.to(self.device[i])
self.models.append(model)
def forward(self, input_ids, attention_mask, labels=None):
num_models = len(self.models)
outputs = []
for i in range(num_models):
output = self.models[i](input_ids=input_ids.to(self.device[i]), attention_mask=attention_mask.to(self.device[i]), labels=(labels.to(self.device[i]) if (labels is not None) else None), return_dict=False)
output = tuple([o.to(0) for o in output])
outputs.append(output)
model_output = outputs[(- 1)]
if (labels is not None):
loss = (sum([output[0] for output in outputs]) / num_models)
logits = [output[1] for output in outputs]
probs = [F.softmax(logit, dim=(- 1)) for logit in logits]
avg_prob = torch.stack(probs, dim=0).mean(0)
reg_loss = (sum([kl_div(avg_prob, prob) for prob in probs]) / num_models)
loss = (loss + (self.args.alpha_t * reg_loss.mean()))
model_output = (((loss,) + model_output[1:]) + (reg_loss,))
return model_output
def resize_token_embeddings(self, n):
for i in range(len(self.models)):
self.models[i].resize_token_embeddings(n) |
def total_ordering(cls):
assert ('__eq__' in cls.__dict__)
assert ('__lt__' in cls.__dict__)
cls.__le__ = (lambda self, other: ((self == other) or (self < other)))
cls.__gt__ = (lambda self, other: (not ((self == other) or (self < other))))
cls.__ge__ = (lambda self, other: (not (self < other)))
cls.__ne__ = (lambda self, other: (not self.__eq__(other)))
return cls |
class IndentLoggerAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
if get_yf_logger().isEnabledFor(logging.DEBUG):
i = (' ' * self.extra['indent'])
if (not isinstance(msg, str)):
msg = str(msg)
msg = '\n'.join([(i + m) for m in msg.split('\n')])
return (msg, kwargs) |
def is_protocol_implementation(left: Instance, right: Instance, proper_subtype: bool=False, class_obj: bool=False, skip: (list[str] | None)=None, options: (Options | None)=None) -> bool:
assert right.type.is_protocol
if (skip is None):
skip = []
type_state.record_protocol_subtype_check(left.type, right.type)
members_not_to_check = {'__init__', '__new__'}
members_not_to_check.update(skip)
if left.type.is_protocol:
members_right = (set(right.type.protocol_members) - members_not_to_check)
members_left = (set(left.type.protocol_members) - members_not_to_check)
if (not members_right.issubset(members_left)):
return False
assuming = (right.type.assuming_proper if proper_subtype else right.type.assuming)
for (l, r) in reversed(assuming):
if ((l == left) and (r == right)):
return True
with pop_on_exit(assuming, left, right):
for member in right.type.protocol_members:
if (member in members_not_to_check):
continue
ignore_names = (member != '__call__')
supertype = get_proper_type(find_member(member, right, left))
assert (supertype is not None)
subtype = mypy.typeops.get_protocol_member(left, member, class_obj)
if (not subtype):
return False
if isinstance(subtype, PartialType):
subtype = (NoneType() if (subtype.type is None) else Instance(subtype.type, ([AnyType(TypeOfAny.unannotated)] * len(subtype.type.type_vars))))
if (not proper_subtype):
is_compat = is_subtype(subtype, supertype, ignore_pos_arg_names=ignore_names, options=options)
else:
is_compat = is_proper_subtype(subtype, supertype)
if (not is_compat):
return False
if (isinstance(subtype, NoneType) and isinstance(supertype, CallableType)):
return False
subflags = get_member_flags(member, left, class_obj=class_obj)
superflags = get_member_flags(member, right)
if (IS_SETTABLE in superflags):
if (not is_subtype(supertype, subtype, options=options)):
return False
if (not class_obj):
if (IS_SETTABLE not in superflags):
if ((IS_CLASSVAR in superflags) and (IS_CLASSVAR not in subflags)):
return False
elif ((IS_CLASSVAR in subflags) != (IS_CLASSVAR in superflags)):
return False
else:
if ((IS_VAR in superflags) and (IS_CLASSVAR not in subflags)):
return False
if (IS_CLASSVAR in superflags):
return False
if ((IS_SETTABLE in superflags) and (IS_SETTABLE not in subflags)):
return False
if ((IS_CLASS_OR_STATIC in superflags) and (IS_CLASS_OR_STATIC not in subflags)):
return False
if (not proper_subtype):
ignore_names = (right.type.protocol_members != ['__call__'])
else:
ignore_names = False
subtype_kind = SubtypeVisitor.build_subtype_kind(subtype_context=SubtypeContext(ignore_pos_arg_names=ignore_names), proper_subtype=proper_subtype)
type_state.record_subtype_cache_entry(subtype_kind, left, right)
return True |
def parse_args():
parser = argparse.ArgumentParser(description='Calculate the prototype for trained model')
parser.add_argument('config', help='trained model config file path')
parser.add_argument('checkpoint', help='checkpoint file path')
parser.add_argument('--round', type=int, default=1, help='save dir for the prototypes')
parser.add_argument('--postfix', default=None, help='postfix for saved file name')
parser.add_argument('--epochs', default=4, type=int, help='epochs for calculating the prototypes')
parser.add_argument('--gpu-id', type=int, default=0, help='id of gpu to use (only applicable to non-distributed training)')
return parser.parse_args() |
class TestPDFJSVersion():
def test_not_found(self, mocker):
mocker.patch('qutebrowser.utils.version.pdfjs.get_pdfjs_res_and_path', side_effect=pdfjs.PDFJSNotFound('/build/pdf.js'))
assert (version._pdfjs_version() == 'no')
def test_unknown(self, monkeypatch):
monkeypatch.setattr('qutebrowser.utils.version.pdfjs.get_pdfjs_res_and_path', (lambda path: (b'foobar', None)))
assert (version._pdfjs_version() == 'unknown (bundled)')
.parametrize('varname', ['PDFJS.version', 'var pdfjsVersion', 'const pdfjsVersion'])
def test_known(self, monkeypatch, varname):
pdfjs_code = textwrap.dedent("\n // Initializing PDFJS global object (if still undefined)\n if (typeof PDFJS === 'undefined') {\n (typeof window !== 'undefined' ? window : this).PDFJS = {};\n }\n\n VARNAME = '1.2.109';\n PDFJS.build = '875588d';\n\n (function pdfjsWrapper() {\n // Use strict in our context only - users might not want it\n 'use strict';\n ".replace('VARNAME', varname)).strip().encode('utf-8')
monkeypatch.setattr('qutebrowser.utils.version.pdfjs.get_pdfjs_res_and_path', (lambda path: (pdfjs_code, '/foo/bar/pdf.js')))
assert (version._pdfjs_version() == '1.2.109 (/foo/bar/pdf.js)')
def test_real_file(self, data_tmpdir):
if (not pdfjs.is_available()):
pytest.skip('No pdfjs found')
ver = version._pdfjs_version()
assert (ver.split()[0] not in ['no', 'unknown']), ver |
def _serialize(value: Any, memo: Optional[SerializeMemoizer]) -> Any:
if isinstance(value, Serialize):
return value.serialize(memo)
elif isinstance(value, list):
return [_serialize(elem, memo) for elem in value]
elif isinstance(value, frozenset):
return list(value)
elif isinstance(value, dict):
return {key: _serialize(elem, memo) for (key, elem) in value.items()}
return value |
def test_unmeargeable_dimshuffles():
x = pt.random.dirichlet(np.ones((3,)), size=(4, 2))
y = x.dimshuffle((0, 2, 1))
z = pt.cumsum(y, axis=(- 2))
w = z.dimshuffle((1, 0, 2))
w_vv = w.clone()
with pytest.raises(RuntimeError, match='could not be derived'):
conditional_logp({w: w_vv}) |
_tag()
def render_lang_template(template_name, escape_html=False):
loc = to_locale(get_language())
lst = [(((template_name + '_') + loc) + '.html'), (((template_name + '_') + settings.LANGUAGES[0][0]) + '.html'), (template_name + '_en.html'), (template_name + '.html')]
for el in lst:
try:
template = get_template(el)
html = template.render()
if escape_html:
return escape(html)
else:
return html
except TemplateDoesNotExist:
pass
return '' |
def run_cmd(cmd):
dsz.ui.Echo('Searching for files')
dsz.control.echo.Off()
dsz.cmd.Run(cmd, dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
try:
dir_path = dsz.cmd.data.Get('DirItem::path', dsz.TYPE_STRING)
dsz.ui.Echo('Found {0} archive(s)'.format(str(len(dir_path))))
return dir_path
except RuntimeError:
return False |
def _parse_pvgis_hourly_csv(src, map_variables):
inputs = {}
inputs['latitude'] = float(src.readline().split(':')[1])
inputs['longitude'] = float(src.readline().split(':')[1])
inputs['elevation'] = float(src.readline().split(':')[1])
inputs['radiation_database'] = src.readline().split(':')[1].strip()
while True:
line = src.readline()
if line.startswith('time,'):
names = line.strip().split(',')
break
elif (line.strip() != ''):
inputs[line.split(':')[0]] = line.split(':')[1].strip()
elif (line == ''):
raise ValueError('No data section was detected. File has probably been modified since being downloaded from PVGIS')
data_lines = []
while True:
line = src.readline()
if (line.strip() == ''):
break
else:
data_lines.append(line.strip().split(','))
data = pd.DataFrame(data_lines, columns=names)
data.index = pd.to_datetime(data['time'], format='%Y%m%d:%H%M', utc=True)
data = data.drop('time', axis=1)
if map_variables:
data = data.rename(columns=VARIABLE_MAP)
data = data.astype(float).astype(dtype={'Int': 'int'})
metadata = {}
for line in src.readlines():
if (':' in line):
metadata[line.split(':')[0]] = line.split(':')[1].strip()
return (data, inputs, metadata) |
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.hidden_dim)
if self.args.use_rnn:
self.rnn = nn.GRUCell(args.hidden_dim, args.hidden_dim)
else:
self.rnn = nn.Linear(args.hidden_dim, args.hidden_dim)
self.fc2 = nn.Linear(args.hidden_dim, args.n_actions)
def init_hidden(self):
return self.fc1.weight.new(1, self.args.hidden_dim).zero_()
def forward(self, inputs, hidden_state):
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape((- 1), self.args.hidden_dim)
if self.args.use_rnn:
h = self.rnn(x, h_in)
else:
h = F.relu(self.rnn(x))
q = self.fc2(h)
return (q, h) |
def test(val_loader, criterion, val_text_features, clip_model, clip_preprocess, clip_device):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(val_loader))
avg_accuracy_per_class = None
for (batch_idx, (inputs, targets)) in enumerate(val_loader):
data_time.update((time.time() - end))
inputs = clip_preprocess(inputs)
if use_cuda:
(inputs, targets) = (inputs.to(cuda_device), targets.to(cuda_device))
(inputs, targets) = (torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets))
outputs = clip_model.encode_image(inputs).float()
outputs_norm = nn.functional.normalize(outputs, dim=1)
val_text_features_norm = nn.functional.normalize(val_text_features, dim=1)
outputs = torch.einsum('ni,mi->nm', outputs_norm, val_text_features_norm)
outputs = (outputs / 0.01)
loss = criterion(outputs, targets)
if (avg_accuracy_per_class is None):
avg_accuracy_per_class = [[0.0, 0.0] for _ in range(outputs.shape[1])]
for i in range(outputs.shape[1]):
outputs_this_class = outputs[(targets == i)]
if (outputs_this_class.shape[0] > 0):
avg_accuracy_per_class[i][0] += (outputs_this_class.argmax(dim=1) == i).sum().item()
avg_accuracy_per_class[i][1] += outputs_this_class.shape[0]
prec1 = accuracy(outputs.data, targets.data, topk=(1,))[0]
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1 (non-cls-avg): {top1: .4f} '.format(batch=(batch_idx + 1), size=len(val_loader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg)
bar.next()
bar.finish()
print('Num samples per class: ', [x[1] for x in avg_accuracy_per_class])
avg_accuracy_per_class = [((100.0 * x[0]) / (x[1] + 1e-06)) for x in avg_accuracy_per_class]
print('Average accuracy per class: {}'.format(avg_accuracy_per_class))
mean_acc = np.mean(avg_accuracy_per_class)
print('Mean accuracy per class (sum(accuracy) / n_classes): {}'.format(mean_acc))
return (losses.avg, mean_acc) |
class _PtqSession(_EvalSession):
def __init__(self, *args, **kwargs):
super(_PtqSession, self).__init__(*args, **kwargs)
self._ptq_result = None
def ptq_result(self) -> PtqResult:
if (self._ptq_result is None):
raise RuntimeError
return self._ptq_result
def set_ptq_result(self, applied_techniques: List[str], model: tf.keras.Model=None, sim: QuantizationSimModel=None, acc: float=None, custom_objects: Dict=None, **kwargs):
if (sim is None):
assert (acc is None)
assert (model is not None)
sim = self._quantsim_factory(model, **kwargs)
acc = self._eval_func(sim.model)
else:
assert (acc is not None)
assert (model is None)
self._set_ptq_result(sim, acc, applied_techniques, custom_objects)
def _set_ptq_result(self, sim: QuantizationSimModel, acc: float, applied_techniques: List[str], custom_objects: Dict=None) -> PtqResult:
if (self._ptq_result is not None):
raise RuntimeError('sess.eval() can be called only once per each _EvalSession instance.')
(model_path, encoding_path) = self._export(sim, custom_objects)
self._ptq_result = PtqResult(model_path=model_path, encoding_path=encoding_path, accuracy=acc, applied_techniques=applied_techniques)
_logger.info(self._ptq_result)
return self._ptq_result
def _export(self, sim: QuantizationSimModel, custom_objects: Dict=None) -> Tuple[(str, str)]:
sim.export(path=self._results_dir, filename_prefix=self._filename, custom_objects=custom_objects)
model_path = os.path.join(self._results_dir, f'{self._filename}')
encoding_path = os.path.join(self._results_dir, f'{self._filename}.encodings')
_logger.info('The results of %s is saved in %s and %s.', self._title, model_path, encoding_path)
return (model_path, encoding_path) |
class Tformat_locale(TestCase):
def test_format_int_locale(self):
assert isinstance(util.format_int_locale(1024), str)
def test_format_float_locale(self):
assert isinstance(util.format_float_locale(1024.1024), str)
def test_format_time_seconds(self):
assert isinstance(util.format_time_seconds(1024), str)
with locale_numeric_conv():
assert (format_time_seconds(1024) == '1,024 seconds')
assert (format_time_seconds(1) == '1 second') |
def compile_rule(filename):
if (filename.startswith('{') and filename.endswith('}')):
return re.compile(filename[1:(- 1)]).match
with open(filename) as f:
return re.compile((('(:?' + ''.join('|'.join((i.strip() for i in f if (i.strip() and (not i.startswith('#'))))))) + ')$')).match |
class GDN(nn.Module):
def __init__(self, in_channels, inverse=False, beta_min=1e-06, gamma_init=0.1):
super().__init__()
beta_min = float(beta_min)
gamma_init = float(gamma_init)
self.inverse = bool(inverse)
self.beta_reparam = NonNegativeParametrizer(minimum=beta_min)
beta = torch.ones(in_channels)
beta = self.beta_reparam.init(beta)
self.beta = nn.Parameter(beta)
self.gamma_reparam = NonNegativeParametrizer()
gamma = (gamma_init * torch.eye(in_channels))
gamma = self.gamma_reparam.init(gamma)
self.gamma = nn.Parameter(gamma)
def forward(self, x):
(_, C, _, _) = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d((x ** 2), gamma, beta)
if self.inverse:
norm = torch.sqrt(norm)
else:
norm = torch.rsqrt(norm)
out = (x * norm)
return out |
class Callback(object):
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass |
class SyntaxHighlighting(object):
_styleElements = Manager.getStyleElementDescriptionsForAllParsers()
def parser(self):
try:
return self.__parser
except AttributeError:
return None
_option(None)
def setParser(self, parserName=''):
self.__parser = Manager.getParserByName(parserName)
self.setStyle() |
class AdditionsExportAll(ContextMenuUnconditional):
visibilitySetting = 'additionsCopyPaste'
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.viewSpecMap = {'droneItemMisc': (_t('Drones'), (lambda cw: cw.drones), exportDrones), 'fighterItemMisc': (_t('Fighters'), (lambda cw: cw.fighters), exportFighters), 'cargoItemMisc': (_t('Cargo Items'), (lambda cw: cw.cargo), exportCargo), 'implantItemMisc': (_t('Implants'), (lambda cw: cw.implants), exportImplants), 'implantItemMiscChar': (_t('Implants'), (lambda cw: cw.implants), exportImplants), 'boosterItemMisc': (_t('Boosters'), (lambda cw: cw.boosters), exportBoosters)}
def display(self, callingWindow, srcContext):
if (srcContext not in self.viewSpecMap):
return False
fit = Fit.getInstance().getFit(self.mainFrame.getActiveFit())
if (fit is None):
return False
if (not self.viewSpecMap[srcContext][1](callingWindow)):
return False
self.srcContext = srcContext
return True
def getText(self, callingWindow, itmContext):
return _t('Copy All {}').format(self.viewSpecMap[self.srcContext][0])
def activate(self, callingWindow, fullContext, i):
items = self.viewSpecMap[self.srcContext][1](callingWindow)
export = self.viewSpecMap[self.srcContext][2](items)
if export:
toClipboard(export) |
def test_manual_response_limits():
out = manual_response.plot()
axs = ct.get_plot_axes(out)
for i in range(manual_response.noutputs):
for j in range(1, manual_response.ninputs):
assert (axs[((i * 2), 0)].get_ylim() == axs[((i * 2), j)].get_ylim())
assert (axs[(((i * 2) + 1), 0)].get_ylim() == axs[(((i * 2) + 1), j)].get_ylim())
assert (axs[(0, 0)].get_ylim() != axs[(2, 0)].get_ylim())
assert (axs[(1, 0)].get_ylim() != axs[(3, 0)].get_ylim()) |
def LoadObjectInfos(file):
with open(file) as f:
contents = f.read().rstrip().splitlines()
data = []
attrs = {}
struct = {}
(name, uuid) = contents.pop(0).split(' : ')
for line in contents:
if ('#' in line):
quotes = 0
for (i, char) in enumerate(line):
if ((char == '"') and (line[(char - 1)] != '\\')):
quotes += 1
if (char == '#'):
if ((quotes % 2) == 0):
break
line = line[:i]
line = line.rstrip()
if (line == ''):
continue
if line.startswith(' '):
(key, value) = line[8:].split(': ')
struct[key] = value
elif line.startswith(' '):
if line.endswith(':'):
key = line[4:(- 1)]
struct = {}
attrs[key] = struct
else:
(key, value) = line[4:].split(': ')
attrs[key] = value
else:
for key in attrs:
if isinstance(attrs[key], dict):
text = '\n'.join((': '.join(x) for x in attrs[key].items()))
attrs[key] = text
data.append(ObjectInfo(name, uuid, attrs))
(name, uuid) = line.split(' : ')
attrs = {}
for key in attrs:
if isinstance(attrs[key], dict):
text = '\n'.join((': '.join(x) for x in attrs[key].items()))
attrs[key] = text
data.append(ObjectInfo(name, uuid, attrs))
return data |
class ManagedCollisionModule(nn.Module):
def __init__(self, device: torch.device) -> None:
super().__init__()
self._device = device
def preprocess(self, features: Dict[(str, JaggedTensor)]) -> Dict[(str, JaggedTensor)]:
pass
def device(self) -> torch.device:
return self._device
def evict(self) -> Optional[torch.Tensor]:
pass
def forward(self, features: Dict[(str, JaggedTensor)]) -> Dict[(str, JaggedTensor)]:
pass
def output_size(self) -> int:
pass
def input_size(self) -> int:
pass
def rebuild_with_output_id_range(self, output_id_range: Tuple[(int, int)], device: Optional[torch.device]=None) -> 'ManagedCollisionModule':
pass |
class ElasticConditionParser(BaseConditionParser):
def build_condition(self, and_subfilters: Optional[List[Any]], or_subfilters: Optional[List[Any]]) -> Optional[Any]:
return {'bool': {'must': and_subfilters, 'should': or_subfilters}}
def build_exact_match_filter(self, field_name: str, value: FieldValue) -> Any:
return {'match': {field_name: value}}
def build_range_filter(self, field_name: str, lt: Optional[FieldValue], gt: Optional[FieldValue], lte: Optional[FieldValue], gte: Optional[FieldValue]) -> Any:
return {'range': {field_name: {'lt': lt, 'gt': gt, 'lte': lte, 'gte': gte}}}
def build_geo_filter(self, field_name: str, lat: float, lon: float, radius: float) -> Any:
return {'geo_distance': {'distance': f'{radius}m', field_name: {'lat': lat, 'lon': lon}}} |
class AbstractBasicLexer(Lexer):
terminals_by_name: Dict[(str, TerminalDef)]
def __init__(self, conf: 'LexerConf', comparator=None) -> None:
...
def next_token(self, lex_state: LexerState, parser_state: Any=None) -> Token:
...
def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]:
with suppress(EOFError):
while True:
(yield self.next_token(state, parser_state)) |
def mod_import(module):
if (not module):
return None
if isinstance(module, types.ModuleType):
return module
if (module.endswith('.py') and os.path.exists(module)):
return mod_import_from_path(module)
try:
return importlib.import_module(module)
except ImportError:
return None |
class TerminalIniter(IniterBase):
def prompt_text(self, prompt, default, validator, retry_msg='Try again.'):
if (default is not None):
p = '{} [{}]: '.format(prompt, default)
else:
p = (prompt + ': ')
while True:
response = input(p)
if ((response == '') and (default is not None)):
response = default
if validator(response):
return response
print(retry_msg)
def prompt_options(self, prompt, options, default=None):
default_ix = None
print(prompt)
for (i, (key, text)) in enumerate(options, start=1):
print('{}. {}'.format(i, text))
if (key == default):
default_ix = i
while True:
p = ('Enter 1-' + str(len(options)))
if (default_ix is not None):
p += ' [{}]'.format(default_ix)
response = input((p + ': '))
if ((default_ix is not None) and (response == '')):
return default
if response.isnumeric():
ir = int(response)
if (1 <= ir <= len(options)):
return options[(ir - 1)][0]
print('Try again.')
def initialise(self):
if (self.directory / 'pyproject.toml').exists():
resp = input('pyproject.toml exists - overwrite it? [y/N]: ')
if ((not resp) or (resp[0].lower() != 'y')):
return
module = self.prompt_text('Module name', self.guess_module_name(), str.isidentifier)
author = self.prompt_text('Author', self.defaults.get('author'), (lambda s: True))
author_email = self.prompt_text('Author email', self.defaults.get('author_email'), self.validate_email)
if ('home_page_template' in self.defaults):
home_page_default = self.defaults['home_page_template'].replace('{modulename}', module)
else:
home_page_default = None
home_page = self.prompt_text('Home page', home_page_default, self.validate_homepage, retry_msg='Should start with or - try again.')
license = self.prompt_options('Choose a license (see for more info)', license_choices, self.defaults.get('license'))
readme = self.find_readme()
self.update_defaults(author=author, author_email=author_email, home_page=home_page, module=module, license=license)
author_info = []
if author:
author_info.append(f'name = {json.dumps(author, ensure_ascii=False)}')
if author_email:
author_info.append(f'email = {json.dumps(author_email)}')
if author_info:
authors_list = ('[{%s}]' % ', '.join(author_info))
else:
authors_list = '[]'
classifiers = []
if (license != 'skip'):
classifiers = [license_names_to_classifiers[license]]
self.write_license(license, author)
with (self.directory / 'pyproject.toml').open('w', encoding='utf-8') as f:
f.write(TEMPLATE.format(name=json.dumps(module), authors=authors_list))
if readme:
f.write(tomli_w.dumps({'readme': readme}))
if (license != 'skip'):
f.write('license = {file = "LICENSE"}\n')
if classifiers:
f.write(f'''classifiers = {json.dumps(classifiers)}
''')
f.write('dynamic = ["version", "description"]\n')
if home_page:
f.write(('\n' + tomli_w.dumps({'project': {'urls': {'Home': home_page}}})))
print()
print('Written pyproject.toml; edit that file to add optional extra info.') |
def get_filter_args_for_specific_event_from_channel(token_network_address: TokenNetworkAddress, channel_identifier: ChannelID, event_name: str, contract_manager: ContractManager, from_block: BlockIdentifier=GENESIS_BLOCK_NUMBER, to_block: BlockIdentifier=BLOCK_ID_LATEST) -> FilterParams:
event_abi = contract_manager.get_event_abi(CONTRACT_TOKEN_NETWORK, event_name)
(_, event_filter_params) = construct_event_filter_params(event_abi=event_abi, abi_codec=ABI_CODEC, contract_address=to_checksum_address(token_network_address), argument_filters={'channel_identifier': channel_identifier}, fromBlock=from_block, toBlock=to_block)
return event_filter_params |
def edit_rest(file_, key):
key = ('description' if key.strip().startswith('d') else 'summary')
if file_.startswith(URL_REPO):
file_ = file_.replace(URL_REPO, '')
with open(file_) as fp:
data = json.load(fp)
data[key] = get_edited_text(data[key])
with open(file_, 'w') as fp:
json.dump(data, fp, **JSON_FORMAT_KWARGS)
fp.write('\n') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.