code stringlengths 281 23.7M |
|---|
def test_ansible_get_hosts():
with tempfile.NamedTemporaryFile() as f:
f.write(b'ungrp\n[g1]\ndebian\n[g2]\nrockylinux\n[g3:children]\ng1\ng2\n[g4:children]\ng3')
f.flush()
def get_hosts(spec):
return AnsibleRunner(f.name).get_hosts(spec)
assert (get_hosts('all') == ['debian', 'rockylinux', 'ungrp'])
assert (get_hosts('*') == ['debian', 'rockylinux', 'ungrp'])
assert (get_hosts('g1') == ['debian'])
assert (get_hosts('*2') == ['rockylinux'])
assert (get_hosts('*ia*') == ['debian'])
assert (get_hosts('*3') == ['debian', 'rockylinux'])
assert (get_hosts('*4') == ['debian', 'rockylinux'])
assert (get_hosts('ungrouped') == ['ungrp'])
assert (get_hosts('un*') == ['ungrp'])
assert (get_hosts('nope') == []) |
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res |
def main():
logs_dir = Path('logs/')
print('Main table:')
experiments = [Experiment('0011', 1, 'OR', 'OR'), Experiment('0012', 2, 'OR', 'FTR'), Experiment('0010', 3, 'LM', 'OR'), Experiment('0001', 4, 'LM', 'FTR')]
metrics_path = f'{logs_dir}/{{experiment_id}}/metrics-agg-test-{{subsplit}}.csv'
create_table(experiments, metrics_path, ['seen', 'unseen'], full_table_line_tmpl)
print('Num explore steps:')
experiments = [Experiment('0000', 1, '', 'N=0'), Experiment('0001', 2, '', 'N=16'), Experiment('0002', 3, '', 'N=32'), Experiment('0003', 4, '', 'N=64'), Experiment('0004', 5, '', 'N=128'), Experiment('0005', 6, '', 'N=256'), Experiment('0006', 7, '', 'N=512')]
metrics_path = f'{logs_dir}/{{experiment_id}}/metrics-agg-val-{{subsplit}}.csv'
create_table(experiments, metrics_path, ['all'], simple_table_line_tmpl)
print('Rearrangement orders:')
experiments = [Experiment('0001', 1, '', 'DIS'), Experiment('0007', 2, '', 'SCG'), Experiment('0008', 3, '', 'A-O'), Experiment('0009', 4, '', 'O-R')]
metrics_path = f'{logs_dir}/{{experiment_id}}/metrics-agg-val-{{subsplit}}.csv'
create_table(experiments, metrics_path, ['all'], rearrange_table_line_tmpl)
print('Exploration algos:')
experiments = [Experiment('0013', 1, '', 'RND'), Experiment('0014', 2, '', 'FWR'), Experiment('0005', 3, '', 'FRT')]
metrics_path = f'{logs_dir}/{{experiment_id}}/metrics-agg-val-{{subsplit}}.csv'
create_table(experiments, metrics_path, ['all'], simple_table_line_tmpl) |
def introduce_splits(word, boundary_list):
result = []
start = 0
end = 0
while (end < len(word)):
end += 1
if (boundary_list[end] == 1):
if (word[start] == "'"):
result += [word[(start + 1):end]]
else:
result += [word[start:end]]
start = end
return result |
def test_one_page(browser: Chrome):
browser.find_element_by_css_selector('[name=input]').send_keys('1')
time.sleep(0.5)
browser.find_element_by_css_selector('[name=textarea]').send_keys('2')
time.sleep(0.5)
Select(browser.find_element_by_css_selector('[name=select]')).select_by_visible_text('B')
time.sleep(0.5)
Select(browser.find_element_by_css_selector('[name=select_multiple]')).select_by_visible_text('A')
time.sleep(0.5)
browser.find_element_by_css_selector('[name=checkbox]').click()
time.sleep(0.5)
browser.find_element_by_css_selector('[name=checkbox_inline]').click()
time.sleep(0.5)
browser.find_element_by_css_selector('[name=radio]').click()
time.sleep(0.5)
browser.find_element_by_css_selector('[name=radio_inline]').click()
time.sleep(0.5)
browser.find_element_by_css_selector('button[type="submit"]').click()
time.sleep(0.5)
codeMirror = browser.find_element_by_css_selector('.CodeMirror pre')
action_chains = ActionChains(browser)
action_chains.move_to_element(codeMirror).click(codeMirror).send_keys('3').perform() |
def simple_typed_attrs(defaults=None, for_frozen=False, allow_mutable_defaults=True, kw_only=None, newtypes=True) -> SearchStrategy[Tuple[(_CountingAttr, SearchStrategy[PosArgs])]]:
if (not is_39_or_later):
res = ((((((any_typed_attrs(defaults, kw_only) | int_typed_attrs(defaults, kw_only)) | str_typed_attrs(defaults, kw_only)) | float_typed_attrs(defaults, kw_only)) | frozenset_typed_attrs(defaults, legacy_types_only=True, kw_only=kw_only)) | homo_tuple_typed_attrs(defaults, legacy_types_only=True, kw_only=kw_only)) | path_typed_attrs(defaults, kw_only=kw_only))
if newtypes:
res = ((res | newtype_int_typed_attrs(defaults, kw_only)) | newtype_attrs_typed_attrs(defaults, kw_only))
if (not for_frozen):
res = (((((res | dict_typed_attrs(defaults, allow_mutable_defaults, kw_only)) | mutable_seq_typed_attrs(defaults, allow_mutable_defaults, legacy_types_only=True, kw_only=kw_only)) | seq_typed_attrs(defaults, allow_mutable_defaults, legacy_types_only=True, kw_only=kw_only)) | list_typed_attrs(defaults, allow_mutable_defaults, legacy_types_only=True, kw_only=kw_only)) | set_typed_attrs(defaults, allow_mutable_defaults, legacy_types_only=True, kw_only=kw_only))
else:
res = ((((((any_typed_attrs(defaults, kw_only) | int_typed_attrs(defaults, kw_only)) | str_typed_attrs(defaults, kw_only)) | float_typed_attrs(defaults, kw_only)) | frozenset_typed_attrs(defaults, kw_only=kw_only)) | homo_tuple_typed_attrs(defaults, kw_only=kw_only)) | path_typed_attrs(defaults, kw_only=kw_only))
if newtypes:
res = ((res | newtype_int_typed_attrs(defaults, kw_only)) | newtype_attrs_typed_attrs(defaults, kw_only))
if (not for_frozen):
res = ((((((res | dict_typed_attrs(defaults, allow_mutable_defaults, kw_only)) | new_dict_typed_attrs(defaults, allow_mutable_defaults, kw_only)) | set_typed_attrs(defaults, allow_mutable_defaults, kw_only=kw_only)) | list_typed_attrs(defaults, allow_mutable_defaults, kw_only=kw_only)) | mutable_seq_typed_attrs(defaults, allow_mutable_defaults, kw_only=kw_only)) | seq_typed_attrs(defaults, allow_mutable_defaults, kw_only=kw_only))
return res |
def test_worker_finish():
procedure = RandomProcedure()
procedure.iterations = 100
procedure.delay = 0.001
file = tempfile.mktemp()
results = Results(procedure, file)
worker = Worker(results)
worker.start()
worker.join(timeout=20.0)
assert (not worker.is_alive())
new_results = Results.load(file, procedure_class=RandomProcedure)
assert (new_results.data.shape == (100, 2)) |
def main():
if (not os.path.exists('simple.xgb')):
raise ValueError('Model file not found: `simple.xgb`\nFIX THIS by running `python `simple.py` first to train the model.')
(data, labels) = datasets.load_breast_cancer(return_X_y=True)
dmat_xgb = xgb.DMatrix(data, labels)
dmat_ray = RayDMatrix(data, labels)
bst = xgb.Booster(model_file='simple.xgb')
pred_xgb = bst.predict(dmat_xgb)
pred_ray = predict(bst, dmat_ray, ray_params=RayParams(num_actors=2))
np.testing.assert_array_equal(pred_xgb, pred_ray)
print(pred_ray) |
('PyQt6.QtWidgets.QFileDialog.getSaveFileName')
('beeref.fileio.sql.SQLiteIO.write_data')
def test_on_action_save_as_when_error(save_mock, dialog_mock, view, qtbot, imgfilename3x3, tmpdir):
item = BeePixmapItem(QtGui.QImage(imgfilename3x3))
view.scene.addItem(item)
view.on_saving_finished = MagicMock()
view.scene.cancel_crop_mode = MagicMock()
filename = os.path.join(tmpdir, 'test.bee')
dialog_mock.return_value = (filename, None)
save_mock.side_effect = sqlite3.Error('foo')
view.on_action_save_as()
qtbot.waitUntil((lambda : (view.on_saving_finished.called is True)))
view.on_saving_finished.assert_called_once_with(filename, ['foo'])
view.scene.cancel_crop_mode.assert_called_once_with() |
def _register(cls):
clsid_path = ('Software\\Classes\\CLSID\\' + cls._reg_clsid_)
progid_path = ('Software\\Classes\\' + cls._reg_progid_)
spec = ((cls.__module__ + '.') + cls.__name__)
win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path, win32con.REG_SZ, cls._reg_desc_)
win32api.RegSetValue(win32con.HKEY_CURRENT_USER, (clsid_path + '\\ProgID'), win32con.REG_SZ, cls._reg_progid_)
win32api.RegSetValue(win32con.HKEY_CURRENT_USER, (clsid_path + '\\PythonCOM'), win32con.REG_SZ, spec)
hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, (clsid_path + '\\InprocServer32'))
win32api.RegSetValueEx(hkey, None, None, win32con.REG_SZ, pythoncom.__file__)
win32api.RegSetValueEx(hkey, 'ThreadingModel', None, win32con.REG_SZ, 'Both')
win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path, win32con.REG_SZ, cls._reg_desc_)
win32api.RegSetValue(win32con.HKEY_CURRENT_USER, (progid_path + '\\CLSID'), win32con.REG_SZ, cls._reg_clsid_) |
class BaseTagField(forms.CharField):
widget = TagWidget
def __init__(self, tag_options=None, autocomplete_tags=None, **kwargs):
super(BaseTagField, self).__init__(**kwargs)
self.tag_options = (tag_options or options.TagOptions())
self.autocomplete_tags = autocomplete_tags
def prepare_value(self, value):
if (not value):
tag_string = ''
elif isinstance(value, str):
tag_string = value
elif (isinstance(value, TagModelQuerySet) or (isinstance(value, (list, tuple)) and all((isinstance(part, (str, BaseTagModel)) for part in value)))):
tag_string = render_tags(value)
else:
if (len(value) != 1):
raise ValueError(_('Tag field could not prepare unexpected value'))
tag_string = value[0]
return super(BaseTagField, self).prepare_value(tag_string)
def _get_tag_options(self):
return self._tag_options
def _prepare_tag_options(self, tag_options):
return (tag_options + options.TagOptions())
def _set_tag_options(self, tag_options):
tag_options = self._prepare_tag_options(tag_options)
self._tag_options = tag_options
self.widget.tag_options = tag_options
tag_options = property(_get_tag_options, _set_tag_options)
def _get_autocomplete_tags(self):
return self._autocomplete_tags
def _set_autocomplete_tags(self, autocomplete_tags):
self._autocomplete_tags = autocomplete_tags
self.widget.autocomplete_tags = autocomplete_tags
autocomplete_tags = property(_get_autocomplete_tags, _set_autocomplete_tags)
def widget_attrs(self, widget):
return {}
def to_python(self, value):
if isinstance(value, str):
value = value.strip()
return super(BaseTagField, self).to_python(value)
def clean(self, value, single=False):
value = super(BaseTagField, self).clean(value)
if self.tag_options.force_lowercase:
value = value.lower()
if single:
return value
try:
return parse_tags(value, self.tag_options.max_count, self.tag_options.space_delimiter)
except ValueError as e:
raise forms.ValidationError(_(('%s' % e))) |
def _make_attr_tuple_class(cls_name, attr_names):
attr_class_name = f'{cls_name}Attributes'
attr_class_template = [f'class {attr_class_name}(tuple):', ' __slots__ = ()']
if attr_names:
for (i, attr_name) in enumerate(attr_names):
attr_class_template.append(f' {attr_name} = _attrs_property(_attrs_itemgetter({i}))')
else:
attr_class_template.append(' pass')
globs = {'_attrs_itemgetter': itemgetter, '_attrs_property': property}
_compile_and_eval('\n'.join(attr_class_template), globs)
return globs[attr_class_name] |
def forwards(apps, schema_editor):
ServerConfig = apps.get_model('server', 'ServerConfig')
for conf in ServerConfig.objects.all():
value = loads(to_bytes(conf.db_value), encoding='bytes')
conf.db_value = b64encode(dumps(deepcopy(value), protocol=4)).decode()
conf.save(update_fields=['db_value']) |
class SMOSL2WINDFileHandler(NetCDF4FileHandler):
def start_time(self):
return datetime.strptime(self['/attr/time_coverage_start'], '%Y-%m-%dT%H:%M:%S Z')
def end_time(self):
return datetime.strptime(self['/attr/time_coverage_end'], '%Y-%m-%dT%H:%M:%S Z')
def platform_shortname(self):
return self.filename_info['platform_shortname']
def platform_name(self):
return self['/attr/platform']
def get_metadata(self, data, ds_info):
metadata = {}
metadata.update(data.attrs)
metadata.update(ds_info)
metadata.update({'platform_shortname': self.platform_shortname, 'platform_name': self.platform_name, 'sensor': self['/attr/instrument'], 'start_time': self.start_time, 'end_time': self.end_time, 'level': self['/attr/processing_level']})
return metadata
def available_datasets(self, configured_datasets=None):
handled_variables = set()
for (var_name, val) in self.file_content.items():
if (not isinstance(val, netCDF4.Variable)):
continue
if (var_name in handled_variables):
logger.debug('Already handled, skipping: %s', var_name)
continue
handled_variables.add(var_name)
new_info = {'name': var_name, 'file_type': self.filetype_info['file_type']}
(yield (True, new_info))
def _mask_dataset(self, data):
try:
fill = data.attrs['_FillValue']
data.attrs['_FillValue'] = np.nan
return data.where((data != fill))
except KeyError:
return data
def _adjust_lon_coord(self, data):
data = data.assign_coords(lon=(((data.lon + 180) % 360) - 180))
return data.where((data < 180.0), (data - 360.0))
def _rename_coords(self, data):
rename_dict = {}
if ('lon' in data.dims):
data = self._adjust_lon_coord(data)
rename_dict['lon'] = 'x'
if ('lat' in data.dims):
rename_dict['lat'] = 'y'
return data.rename(rename_dict)
def _remove_time_coordinate(self, data):
data = data.squeeze()
if ('time' in data.coords):
data = data.drop_vars('time')
return data
def _roll_dataset_lon_coord(self, data):
if ('lon' in data.dims):
data = data.roll(lon=720, roll_coords=True)
return data
def get_dataset(self, ds_id, ds_info):
data = self[ds_id['name']]
data.attrs = self.get_metadata(data, ds_info)
data = self._remove_time_coordinate(data)
data = self._roll_dataset_lon_coord(data)
data = self._rename_coords(data)
data = self._mask_dataset(data)
if ((len(data.dims) >= 2) and all([(dim in data.dims) for dim in ['x', 'y']])):
data = data.where(((data.y > (- 90.0)) & (data.y < 90.0)), drop=True)
elif ((len(data.dims) == 1) and ('y' in data.dims)):
data = data.where(((data.y > 0) & (data.y < (len(data.y) - 1))), drop=True)
return data
def _create_area_extent(self, width, height):
_lon = self._adjust_lon_coord(self['lon'])
_lon = self._roll_dataset_lon_coord(_lon)
latlon = np.meshgrid(_lon, self['lat'][1:(self['lat/shape'][0] - 1)])
lower_left_x = (latlon[0][(height - 1)][0] - 0.125)
lower_left_y = (latlon[1][(height - 1)][0] + 0.125)
upper_right_y = (latlon[1][1][(width - 1)] - 0.125)
upper_right_x = (latlon[0][1][(width - 1)] + 0.125)
return (lower_left_x, lower_left_y, upper_right_x, upper_right_y)
def get_area_def(self, dsid):
width = self['lon/shape'][0]
height = (self['lat/shape'][0] - 2)
area_extent = self._create_area_extent(width, height)
description = 'SMOS L2 Wind Equirectangular Projection'
area_id = 'smos_eqc'
proj_id = 'equirectangular'
proj_str = self['/attr/geospatial_bounds_vertical_crs']
area_def = AreaDefinition(area_id, description, proj_id, proj_str, width, height, area_extent)
return area_def |
.usefixtures('repo_with_git_flow_angular_commits')
def test_default_config_is_used_when_none_in_toml_config_file(cli_runner, toml_file_with_no_configuration_for_psr):
result = cli_runner.invoke(main, ['--noop', '--config', str(toml_file_with_no_configuration_for_psr), 'version'])
assert (result.exit_code == 0) |
def main():
kld_start_inc = 3000
kld_weight = 0.01
kld_max = 0.15
kld_inc = ((kld_max - kld_weight) / (n_iter - kld_start_inc))
trainer = optim.Adam(model.vae_params, lr=lr)
for it in range(n_iter):
(inputs, labels) = dataset.next_batch(args.gpu)
(recon_loss, kl_loss) = model.forward(inputs)
loss = (recon_loss + (kld_weight * kl_loss))
if ((it > kld_start_inc) and (kld_weight < kld_max)):
kld_weight += kld_inc
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm(model.vae_params, 5)
trainer.step()
trainer.zero_grad()
if ((it % log_interval) == 0):
z = model.sample_z_prior(1)
c = model.sample_c_prior(1)
sample_idxs = model.sample_sentence(z, c)
sample_sent = dataset.idxs2sentence(sample_idxs)
print('Iter-{}; Loss: {:.4f}; Recon: {:.4f}; KL: {:.4f}; Grad_norm: {:.4f};'.format(it, loss.data[0], recon_loss.data[0], kl_loss.data[0], grad_norm))
print('Sample: "{}"'.format(sample_sent))
print()
new_lr = (lr * (0.5 ** (it // lr_decay_every)))
for param_group in trainer.param_groups:
param_group['lr'] = new_lr |
_fixtures(WebFixture, RemoteMethodFixture, ResultScenarios)
def test_different_kinds_of_result(web_fixture, remote_method_fixture, result_scenarios):
fixture = result_scenarios
def callable_object():
return fixture.value_to_return
remote_method = RemoteMethod(web_fixture.view, 'amethod', callable_object, default_result=fixture.method_result, disable_csrf_check=True)
wsgi_app = remote_method_fixture.new_wsgi_app(remote_method=remote_method)
browser = Browser(wsgi_app)
browser.post('/_amethod_method', {})
assert fixture.results_match(fixture.expected_response, browser.raw_html)
assert (browser.last_response.charset == fixture.expected_charset)
assert (browser.last_response.content_type == fixture.expected_content_type) |
_start_docstrings('The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.', SEGFORMER_START_DOCSTRING)
class TFSegformerModel(TFSegformerPreTrainedModel):
def __init__(self, config: SegformerConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.config = config
self.segformer = TFSegformerMainLayer(config, name='segformer')
_inputs
_start_docstrings_to_model_forward(SEGFORMER_INPUTS_DOCSTRING.format('(batch_size, sequence_length)'))
_code_sample_docstrings(processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def call(self, pixel_values: tf.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[(Tuple, TFBaseModelOutput)]:
outputs = self.segformer(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
return outputs
def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput:
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=output.hidden_states, attentions=output.attentions) |
def lru_cache(maxsize=255, timeout=None):
class _LRU_Cache_class(object):
def __init__(self, input_func, max_size, timeout):
self._input_func = input_func
self._max_size = max_size
self._timeout = timeout
self._caches_dict = {}
def cache_clear(self, caller=None):
if (caller in self._caches_dict):
del self._caches_dict[caller]
self._caches_dict[caller] = [OrderedDict(), time.time()]
def __get__(self, obj, obj_type):
return_func = functools.partial(self._cache_wrapper, obj)
return_func.cache_clear = functools.partial(self.cache_clear, obj)
return functools.wraps(self._input_func)(return_func)
def __call__(self, *args, **kwargs):
return self._cache_wrapper(None, *args, **kwargs)
__call__.cache_clear = cache_clear
def _cache_wrapper(self, caller, *args, **kwargs):
kwargs_key = ''.join(map((lambda x: ((str(x) + str(type(kwargs[x]))) + str(kwargs[x]))), sorted(kwargs)))
key = (''.join(map((lambda x: (str(type(x)) + str(x))), args)) + kwargs_key)
if (caller not in self._caches_dict):
self._caches_dict[caller] = [OrderedDict(), time.time()]
elif (self._timeout is not None):
if ((time.time() - self._caches_dict[caller][1]) > self._timeout):
self.cache_clear(caller)
cur_caller_cache_dict = self._caches_dict[caller][0]
if (key in cur_caller_cache_dict):
return cur_caller_cache_dict[key]
if (len(cur_caller_cache_dict) >= self._max_size):
cur_caller_cache_dict.popitem(False)
cur_caller_cache_dict[key] = (self._input_func(caller, *args, **kwargs) if (caller is not None) else self._input_func(*args, **kwargs))
return cur_caller_cache_dict[key]
return (lambda input_func: functools.wraps(input_func)(_LRU_Cache_class(input_func, maxsize, timeout))) |
def talker():
ser = serial.Serial('/dev/serial/by-path/pci-0000:00:14.0-usb-0:3:1.0-port0', 115200, timeout=0)
time_now = datetime.now()
time_string = time_now.strftime('%Y-%m-%d-%H-%M-%S')
gps_filename = (time_string + '_gps.bin')
timestamp_filename = (time_string + '_ros_time.txt')
gps_file = open(gps_filename, 'w')
timestamp_file = open(timestamp_filename, 'w')
rospy.init_node('gps_log_node', anonymous=False)
rate = rospy.Rate(50)
time_stamp_delta_time = 0.1
prev_time = (rospy.Time.now().to_sec() - time_stamp_delta_time)
rospy.loginfo('GPS Logger Running...')
while (not rospy.is_shutdown()):
data = ser.read(1000)
if (len(data) > 0):
ros_time = rospy.Time.now().to_sec()
if ((ros_time - prev_time) >= time_stamp_delta_time):
ros_time_string = (str(ros_time) + '\n')
timestamp_file.write(ros_time_string)
prev_time = ros_time
gps_file.write(data)
rate.sleep()
gps_file.close()
timestamp_file.close()
ser.close() |
def is_list_of_tuples(value: t.Any) -> t.Tuple[(bool, t.Optional[t.Sequence[t.Tuple[(str, ScalarVariableValue)]]])]:
if ((not value) or (not isinstance(value, (list, tuple))) or (not all(((isinstance(t, tuple) and (len(t) == 2)) for t in value)))):
return (False, None)
return (True, value) |
def make_loss(cfg):
sampler = cfg.DATALOADER.SAMPLER
triplet = TripletLoss(cfg.SOLVER.MARGIN)
cross_entropy = CrossEntropyLoss(num_classes=cfg.SOLVER.CLASSNUM, epsilon=cfg.SOLVER.SMOOTH)
if (sampler == 'softmax'):
def loss_func(score, feat, target):
return F.cross_entropy(score, target)
elif (cfg.DATALOADER.SAMPLER == 'triplet'):
def loss_func(score, feat, target):
return triplet(feat, target)[0]
elif (cfg.DATALOADER.SAMPLER == 'softmax_triplet'):
if cfg.MODEL.CAL:
def loss_func(score, score_hat, feat, target):
loss_id = ((cross_entropy(score, target) + triplet(feat, target)[0]) + cross_entropy(score_hat, target))
return loss_id
else:
def loss_func(score, feat, target):
loss_id = (cross_entropy(score, target) + triplet(feat, target)[0])
return loss_id
else:
print('expected sampler should be softmax, triplet or softmax_triplet, but got {}'.format(cfg.DATALOADER.SAMPLER))
return loss_func |
def run_episode(player, args, total_reward, model_options, training, shared_model=None):
num_steps = args.num_steps
update_test_type = {'meta_learning': args.update_test_meta_learning}
update_test = (True in update_test_type.values())
for _ in range(num_steps):
player.action(model_options, training, update_test)
if update_test_type['meta_learning']:
current_state = str(player.episode.environment.controller.state)
if (current_state in player.episode.states):
target_action_prob = player.episode.meta_predictions[(- 1)]
update_test_model(args, player, target_action_prob, 1)
total_reward = (total_reward + player.reward)
if player.done:
break
return total_reward |
class ActionGroupParameterItem(GroupParameterItem):
def __init__(self, param, depth):
self.itemWidget = QtWidgets.QWidget()
self.button = ParameterControlledButton(parent=self.itemWidget)
self.button.clicked.connect(param.activate)
self.itemWidget.setLayout((layout := QtWidgets.QHBoxLayout()))
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.button)
super().__init__(param, depth)
def treeWidgetChanged(self):
ParameterItem.treeWidgetChanged(self)
tw = self.treeWidget()
if (tw is None):
return
tw.setItemWidget(self, 1, self.itemWidget)
def optsChanged(self, param, opts):
if ('button' in opts):
buttonOpts = (opts['button'] or dict(visible=False))
self.button.updateOpts(param, buttonOpts)
super().optsChanged(param, opts) |
.parametrize('size1, supp_size1, size2, supp_size2, axis, concatenate', [(None, 2, None, 2, 0, True), (None, 2, None, 2, (- 1), True), ((5,), 2, (3,), 2, 0, True), ((5,), 2, (3,), 2, (- 2), True), ((2,), 5, (2,), 3, 1, True), pytest.param((2,), 5, (2,), 5, 0, False, marks=pytest.mark.xfail(reason='cannot measure dimshuffled multivariate RVs')), pytest.param((2,), 5, (2,), 5, 1, False, marks=pytest.mark.xfail(reason='cannot measure dimshuffled multivariate RVs'))])
def test_measurable_join_multivariate(size1, supp_size1, size2, supp_size2, axis, concatenate):
base1_rv = pt.random.multivariate_normal(np.zeros(supp_size1), np.eye(supp_size1), size=size1, name='base1')
base2_rv = pt.random.dirichlet(np.ones(supp_size2), size=size2, name='base2')
if concatenate:
y_rv = pt.concatenate((base1_rv, base2_rv), axis=axis)
else:
y_rv = pt.stack((base1_rv, base2_rv), axis=axis)
y_rv.name = 'y'
base1_vv = base1_rv.clone()
base2_vv = base2_rv.clone()
y_vv = y_rv.clone()
base_logps = [pt.atleast_1d(logp) for logp in conditional_logp({base1_rv: base1_vv, base2_rv: base2_vv}).values()]
if concatenate:
axis_norm = np.core.numeric.normalize_axis_index(axis, base1_rv.ndim)
base_logps = pt.concatenate(base_logps, axis=(axis_norm - 1))
else:
axis_norm = np.core.numeric.normalize_axis_index(axis, (base1_rv.ndim + 1))
base_logps = pt.stack(base_logps, axis=(axis_norm - 1))
y_logp = y_logp = logp(y_rv, y_vv)
assert_no_rvs(y_logp)
base1_testval = base1_rv.eval()
base2_testval = base2_rv.eval()
if concatenate:
y_testval = np.concatenate((base1_testval, base2_testval), axis=axis)
else:
y_testval = np.stack((base1_testval, base2_testval), axis=axis)
np.testing.assert_allclose(base_logps.eval({base1_vv: base1_testval, base2_vv: base2_testval}), y_logp.eval({y_vv: y_testval})) |
class MiAnno():
def __init__(self, file: Path, logger: Logger) -> None:
with open(file, encoding='utf-8') as f:
data = json.load(f)
if (data.get('_anno_version', '') != '1.0'):
logger.warning('%s: Annotation data version is incompatible', file)
self.file = file
self.anno_version: str = data.get('_anno_version', 'unknown')
self.annotator: str = data.get('_annotator', 'unknown')
self.occr: dict = data['mi_anno']
def dump(self) -> None:
with open(self.file, 'w') as f:
dump_json({'_anno_version': self.anno_version, '_annotator': self.annotator, 'mi_anno': self.occr}, f) |
class CssLink(Link):
_template = Template('{% if kwargs.get("embedded",False) %}<style>{{this.get_code()}}</style>{% else %}<link rel="stylesheet" href="{{this.url}}"/>{% endif %}')
def __init__(self, url, download=False):
super().__init__()
self._name = 'CssLink'
self.url = url
self.code = None
if download:
self.get_code() |
def check_errors(settings):
deprstring = 'settings.%s should be renamed to %s. If defaults are used, their path/classname must be updated (see evennia/settings_default.py).'
if hasattr(settings, 'CMDSET_DEFAULT'):
raise DeprecationWarning((deprstring % ('CMDSET_DEFAULT', 'CMDSET_CHARACTER')))
if hasattr(settings, 'CMDSET_OOC'):
raise DeprecationWarning((deprstring % ('CMDSET_OOC', 'CMDSET_ACCOUNT')))
if (settings.WEBSERVER_ENABLED and (not isinstance(settings.WEBSERVER_PORTS[0], tuple))):
raise DeprecationWarning('settings.WEBSERVER_PORTS must be on the form [(proxyport, serverport), ...]')
if hasattr(settings, 'BASE_COMM_TYPECLASS'):
raise DeprecationWarning((deprstring % ('BASE_COMM_TYPECLASS', 'BASE_CHANNEL_TYPECLASS')))
if hasattr(settings, 'COMM_TYPECLASS_PATHS'):
raise DeprecationWarning((deprstring % ('COMM_TYPECLASS_PATHS', 'CHANNEL_TYPECLASS_PATHS')))
if hasattr(settings, 'CHARACTER_DEFAULT_HOME'):
raise DeprecationWarning('settings.CHARACTER_DEFAULT_HOME should be renamed to DEFAULT_HOME. See also settings.START_LOCATION (see evennia/settings_default.py).')
deprstring = 'settings.%s is now merged into settings.TYPECLASS_PATHS. Update your settings file.'
if hasattr(settings, 'OBJECT_TYPECLASS_PATHS'):
raise DeprecationWarning((deprstring % 'OBJECT_TYPECLASS_PATHS'))
if hasattr(settings, 'SCRIPT_TYPECLASS_PATHS'):
raise DeprecationWarning((deprstring % 'SCRIPT_TYPECLASS_PATHS'))
if hasattr(settings, 'ACCOUNT_TYPECLASS_PATHS'):
raise DeprecationWarning((deprstring % 'ACCOUNT_TYPECLASS_PATHS'))
if hasattr(settings, 'CHANNEL_TYPECLASS_PATHS'):
raise DeprecationWarning((deprstring % 'CHANNEL_TYPECLASS_PATHS'))
if hasattr(settings, 'SEARCH_MULTIMATCH_SEPARATOR'):
raise DeprecationWarning('settings.SEARCH_MULTIMATCH_SEPARATOR was replaced by SEARCH_MULTIMATCH_REGEX and SEARCH_MULTIMATCH_TEMPLATE. Update your settings file (see evennia/settings_default.py for more info).')
gametime_deprecation = 'The settings TIME_SEC_PER_MIN, TIME_MIN_PER_HOUR,TIME_HOUR_PER_DAY, TIME_DAY_PER_WEEK, \nTIME_WEEK_PER_MONTH and TIME_MONTH_PER_YEAR are no longer supported. Remove them from your settings file to continue.\nIf you want to use and manipulate these time units, the tools from utils.gametime are now found in contrib/convert_gametime.py instead.'
if any((hasattr(settings, value) for value in ('TIME_SEC_PER_MIN', 'TIME_MIN_PER_HOUR', 'TIME_HOUR_PER_DAY', 'TIME_DAY_PER_WEEK', 'TIME_WEEK_PER_MONTH', 'TIME_MONTH_PER_YEAR'))):
raise DeprecationWarning(gametime_deprecation)
game_directory_deprecation = 'The setting GAME_DIRECTORY_LISTING was removed. It must be renamed to GAME_INDEX_LISTING instead.'
if hasattr(settings, 'GAME_DIRECTORY_LISTING'):
raise DeprecationWarning(game_directory_deprecation)
chan_connectinfo = settings.CHANNEL_CONNECTINFO
if ((chan_connectinfo is not None) and (not isinstance(chan_connectinfo, dict))):
raise DeprecationWarning('settings.CHANNEL_CONNECTINFO has changed. It must now be either None or a dict specifying the properties of the channel to create.') |
class FakeDownloadManager():
def __init__(self, tmpdir):
self._tmpdir = tmpdir
self.downloads = []
def _open_fileobj(self, target):
if isinstance(target, downloads.FileDownloadTarget):
target.fileobj = open(target.filename, 'wb')
try:
(yield target.fileobj)
finally:
target.fileobj.close()
else:
(yield target.fileobj)
def get(self, url, target, **kwargs):
with self._open_fileobj(target):
download_item = FakeDownloadItem(target.fileobj, name=url.path())
with (self._tmpdir / url.path()).open('rb') as fake_url_file:
shutil.copyfileobj(fake_url_file, download_item.fileobj)
self.downloads.append(download_item)
return download_item
def has_downloads_with_nam(self, _nam):
return False |
class SwagExample(object):
def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label=None):
self.swag_id = swag_id
self.context_sentence = context_sentence
self.start_ending = start_ending
self.endings = [ending_0, ending_1, ending_2, ending_3]
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
attributes = ['swag_id: {}'.format(self.swag_id), 'context_sentence: {}'.format(self.context_sentence), 'start_ending: {}'.format(self.start_ending), 'ending_0: {}'.format(self.endings[0]), 'ending_1: {}'.format(self.endings[1]), 'ending_2: {}'.format(self.endings[2]), 'ending_3: {}'.format(self.endings[3])]
if (self.label is not None):
attributes.append('label: {}'.format(self.label))
return ', '.join(attributes) |
def handle_simple_project_import_records(data: dict) -> dict:
resp = {'count': 2}
if ('non_existent_key' in data['data'][0]):
resp = {'error': 'invalid field'}
return_content = data['returnContent'][0]
if (return_content == 'ids'):
resp = ['1', '2']
elif (return_content == 'nothing'):
resp = {}
return resp |
class Enviroment(object):
def __init__(self, user, system, verbose=True, config=None):
self.DB = DB
self.sys_action_cardinality = dialog_config.SYS_ACTION_CARDINALITY
self.usr_action_cardinality = dialog_config.USER_ACT_CARDINALITY
self.user_action = None
self.last_agent_action = None
self.user = user
self.system = system
if (config is not None):
self.config = config
else:
raise NotImplementedError
self.done = False
self.success = None
self.verbose = verbose
self.step_i = 0
self.first_step = True
def zero_state(self, total_query=1):
zero_dialog_state = {'informable_slots': {'food': 0, 'area': 0, 'pricerange': 0, 'name': 0}, 'requestable_slots_provided': {'address': 0, 'phone': 0, 'postcode': 0}, 'requestable_slots_asked': {'address': 0, 'phone': 0, 'postcode': 0}, 'reservation_slots_provided': {'num_people': 0, 'time': 0, 'day': 0}, 'match_presented': 0, 'no_match_presented': 0, 'num_else_so_far': 0, 'total_query': total_query}
return zero_dialog_state
def reset(self, mode, mturk_res=None):
self.done = False
self.success = None
self.first_step = True
self.user.reset()
self.system.reset()
self.step_i = 0
(self.last_sys_act, self.last_sys_sent) = (None, None)
print('goal', self.user.goal)
next_state = self.step_user(mode=mode, mturk_res=mturk_res)
return next_state
def queryable(self):
if (self.state_maintained['informable_slots']['food'] and self.state_maintained['informable_slots']['area'] and self.state_maintained['informable_slots']['pricerange']):
return True
else:
return False
def query_status(self):
match_nums_list = self.user.goal['match_nums']
if (match_nums_list[0] <= (self.state_maintained['no_match_presented'] + self.state_maintained['match_presented'])):
return (1, 0)
else:
return (0, 1)
def query_in_DB(self):
query_expr = []
for g in self.user.goal['goal_entity']:
tmp_food_query = ''
tmp_area_query = ''
tmp_price_query = ''
if (g['food'] != 'dontcare'):
tmp_food_query = ((' food == "' + g['food']) + '"')
if (g['area'] != 'dontcare'):
tmp_area_query = ((' area == "' + g['area']) + '"')
if (g['pricerange'] != 'dontcare'):
tmp_price_query = ((' pricerange == "' + g['pricerange']) + '"')
tmp_query = [tmp_food_query, tmp_area_query, tmp_price_query]
tmp_query = [q for q in tmp_query if q]
tmp_query = ' and '.join(tmp_query)
query_expr.append(tmp_query)
match_nums = []
for q in query_expr:
if q:
sample_from_subset = self.DB.query(q)
match_nums.append(sample_from_subset.shape[0])
else:
match_nums.append(sample_from_subset.shape[0])
return match_nums
def maintain_states(self, who):
self.state_maintained['total_query'] = self.user.goal['total_query']
if (who == 'user'):
if ('value_food' in self.user_action):
self.state_maintained['informable_slots']['food'] = 1
if ('value_pricerange' in self.user_action):
self.state_maintained['informable_slots']['pricerange'] = 1
if ('value_area' in self.user_action):
self.state_maintained['informable_slots']['area'] = 1
if ('slot_postcode' in self.user_action):
self.state_maintained['requestable_slots_asked']['postcode'] = 1
if ('slot_phone' in self.user_action):
self.state_maintained['requestable_slots_asked']['phone'] = 1
if ('slot_address' in self.user_action):
self.state_maintained['requestable_slots_asked']['address'] = 1
elif (who == 'agent'):
if self.last_agent_action:
if ('value_postcode' in self.last_agent_action):
self.state_maintained['requestable_slots_provided']['postcode'] = 1
if ('value_phone' in self.last_agent_action):
self.state_maintained['requestable_slots_provided']['phone'] = 1
if ('value_address' in self.last_agent_action):
self.state_maintained['requestable_slots_provided']['address'] = 1
if ('[value_name] is a good restaurant' in self.last_agent_action):
self.state_maintained['match_presented'] += 1
if ('no restaurants matching your request' in self.last_agent_action):
self.state_maintained['no_match_presented'] += 1
def update_state(self, act, who):
if (who == 'usr'):
self.system.update_state(act=act, who='usr')
elif (who == 'sys'):
self.system.update_state(act=act, who='sys')
else:
raise ValueError('{} is not allowed'.format(who))
def step(self, provided_sys_act=None, mode=dialog_config.RL_TRAINING):
result_step_sys = self.step_system(provided_sys_act=provided_sys_act, mode=mode)
if (result_step_sys is not None):
(next_state, reward, self.done) = result_step_sys
print('reward per turn', reward)
return (next_state, reward, self.done)
(next_state, reward, self.done) = self.step_user(mode=mode)
print('reward per turn', reward)
return (next_state, reward, self.done)
def step_user(self, mode, mturk_res=None):
import pdb
if self.config.INTERACTIVE:
if (mturk_res is None):
self.last_usr_sent = input('Please respond: ')
else:
self.last_usr_sent = mturk_res
self.last_usr_act_true = None
else:
(self.last_usr_act_true, self.last_usr_sent) = self.user.respond(sys_act=self.last_sys_act, prev_sys=self.last_sys_sent)
if (self.last_usr_act_true is None):
self.last_usr_act_pred = self.system.nlu(usr_sent=self.last_usr_sent, usr_act=self.last_usr_act_true, mode=dialog_config.RL_TRAINING)
else:
self.last_usr_act_pred = self.system.nlu(usr_sent=self.last_usr_sent, usr_act=self.last_usr_act_true, mode=mode)
self.update_state(act=self.last_usr_act_pred, who='usr')
if (self.verbose and (not self.config.INTERACTIVE)):
print('{} Usr: {}'.format(self.step_i, self.last_usr_sent))
print('True user act: ', self.last_usr_act_true)
if (not self.config.use_sequicity_for_rl_model):
next_state = self.system.prepare_state_representation()
else:
next_state = None
if self.first_step:
self.first_step = False
return next_state
if (not self.config.INTERACTIVE):
if self.config.use_new_reward:
reward = self.evaluate_cur_move_new()
else:
reward = self.evaluate_cur_move()
return (next_state, reward, self.done)
else:
reward = 0
if (self.last_usr_act_pred.act == UserAct.GOODBYE):
self.done = True
(self.last_sys_act, self.last_sys_sent) = (Action(SystemAct.GOODBYE, None), 'Thanks for using the system! Have a good day!')
if self.verbose:
print('{} Sys: {}'.format(self.step_i, self.last_sys_sent))
return (next_state, reward, self.done)
def step_system(self, provided_sys_act=None, mode=dialog_config.RL_TRAINING):
if ((mode == dialog_config.RL_TRAINING) or (mode == dialog_config.RANDOM_ACT)):
assert (provided_sys_act is not None)
(self.last_sys_act, self.last_sys_sent) = self.system.respond(provided_sys_act=provided_sys_act, mode=mode, usr_act=self.last_usr_act_pred, usr_sent=None)
else:
(self.last_sys_act, self.last_sys_sent) = self.system.respond(provided_sys_act=provided_sys_act, mode=mode, usr_act=self.last_usr_act_pred, usr_sent=None)
if (self.system.dialog_status == dialog_config.FAILED_DIALOG):
next_state = None
reward = dialog_config.FAILURE_REWARD
self.done = True
self.success = False
return (next_state, reward, self.done)
self.update_state(act=self.last_sys_act, who='sys')
if self.verbose:
print('{} Sys: {}'.format(self.step_i, self.last_sys_sent))
self.step_i += 1
return None
def evaluate_cur_move(self):
if (self.user.dialog_status == dialog_config.SUCCESS_DIALOG):
reward = dialog_config.SUCCESS_REWARD
self.done = True
self.success = True
elif (self.user.dialog_status == dialog_config.FAILED_DIALOG):
reward = dialog_config.FAILURE_REWARD
self.done = True
self.success = False
elif (self.user.dialog_status in [dialog_config.NO_OUTCOME_YET, dialog_config.TURN_FAIL_FOR_SL, dialog_config.TURN_SUCCESS_FOR_SL]):
reward = dialog_config.PER_TURN_REWARD
self.done = False
self.success = None
return reward
def evaluate_cur_move_new(self):
def calculate_per_turn_reward(self, last_usr_act_str, sys_act_str):
turn_reward = dialog_config.PER_TURN_REWARD
if (last_usr_act_str == UserAct.INFORM_TYPE):
if (sys_act_str in [SystemAct.PRESENT_RESULT, SystemAct.NOMATCH_RESULT]):
turn_reward = dialog_config.TURN_AWARD
elif (last_usr_act_str == UserAct.INFORM_TYPE_CHANGE):
if (sys_act_str in [SystemAct.PRESENT_RESULT, SystemAct.NOMATCH_RESULT]):
turn_reward = dialog_config.TURN_AWARD
elif (last_usr_act_str == UserAct.ASK_INFO):
if (sys_act_str in [SystemAct.PROVIDE_INFO]):
turn_reward = dialog_config.TURN_AWARD
elif (last_usr_act_str == UserAct.MAKE_RESERVATION):
if (sys_act_str in [SystemAct.ASK_RESERVATION_INFO, SystemAct.BOOKING_SUCCESS, SystemAct.BOOKING_FAIL]):
turn_reward = dialog_config.TURN_AWARD
elif (last_usr_act_str == UserAct.MAKE_RESERVATION_CHANGE_TIME):
if (sys_act_str in [SystemAct.BOOKING_SUCCESS, SystemAct.BOOKING_FAIL]):
turn_reward = dialog_config.TURN_AWARD
elif (last_usr_act_str == UserAct.ANYTHING_ELSE):
if (sys_act_str in [SystemAct.NO_OTHER, SystemAct.PRESENT_RESULT, SystemAct.NOMATCH_RESULT]):
turn_reward = dialog_config.TURN_AWARD
elif (last_usr_act_str == UserAct.GOODBYE):
if (sys_act_str in [SystemAct.GOODBYE]):
turn_reward = dialog_config.TURN_AWARD
return turn_reward
if (self.user.dialog_status == dialog_config.SUCCESS_DIALOG):
reward = dialog_config.SUCCESS_REWARD
self.done = True
self.success = True
elif (self.user.dialog_status == dialog_config.FAILED_DIALOG):
reward = dialog_config.FAILURE_REWARD
self.done = True
self.success = False
elif (self.user.dialog_status in [dialog_config.NO_OUTCOME_YET, dialog_config.TURN_FAIL_FOR_SL, dialog_config.TURN_SUCCESS_FOR_SL]):
reward = calculate_per_turn_reward(self, last_usr_act_str=self.user.state['usr_act_sequence'][(- 2)], sys_act_str=self.last_sys_act.act)
self.done = False
self.success = None
return reward |
.fast
def test_vibrational_levels_labelling(verbose=True, *args, **kwargs):
if verbose:
printm('Some vibrational level formats:')
printm('... CO, HITRAN format (v):\t\t', vib_lvl_name_hitran_class1(10))
printm('... CO2, HITRAN format (v1,v2,l2,v3):\t\t', vib_lvl_name_hitran_class5(2, 1, 1, 3))
printm('... CO2, HITRAN format, short (v1v2l2v3):\t', vib_lvl_name_hitran_class5_short(2, 1, 1, 3))
printm('... CO2, CDSD format (p,c):\t\t', vib_lvl_name_cdsd_pc(14, 1))
printm('... CO2, CDSD format (p,c,N):\t\t', vib_lvl_name_cdsd_pcN(14, 1, 1))
assert (vib_lvl_name_hitran_class1(10) == '(10)')
assert (vib_lvl_name_hitran_class5(2, 1, 1, 3) == '(2,1,1,3)')
assert (vib_lvl_name_hitran_class5_short(2, 1, 1, 3) == '21`1`3')
assert (vib_lvl_name_cdsd_pc(14, 1) == '(14,1)')
assert (vib_lvl_name_cdsd_pcN(14, 1, 1) == '(14,1,1)')
return |
def hand_baseline(net_type, output_dir, test_id=0):
dataset = 'msra'
n = caffe.NetSpec()
(fx_, fy_, ux_, uy_) = util.get_param(dataset)
point_num_ = util.get_joint_num(dataset)
root_folder_ = config.msra_data_dir
if (net_type == 'train'):
image_source_ = (root_folder_ + 'train_image_{}.txt'.format(test_id))
pose_data_param_train = dict(image_source=image_source_, label_source=(root_folder_ + 'train_label_{}.txt'.format(test_id)), root_folder=root_folder_, batch_size=128, shuffle=True, new_height=96, new_width=96, point_num=point_num_, point_dim=3, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.MSRA)
(n.data, n.pose) = L.PoseData(name='data', include=dict(phase=0), transform_param=dict(is_trans=True, trans_dx=10, trans_dy=10, is_rotate=True, rotate_deg=180, is_zoom=True, zoom_scale=0.1), pose_data_param=pose_data_param_train, ntop=2)
first_layer = str(n.to_proto())
pose_data_param_test = dict(image_source=(root_folder_ + 'test_image_{}.txt'.format(test_id)), label_source=(root_folder_ + 'test_label_{}.txt'.format(test_id)), root_folder=root_folder_, batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=point_num_, point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.MSRA)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=(point_num_ * 3)), include=dict(phase=1), ntop=2)
elif (net_type == 'test-train'):
pose_data_param_test = dict(image_source='{}/cache/train_image_{}.txt'.format(output_dir, test_id), label_source='{}/cache/train_label_{}.txt'.format(output_dir, test_id), root_folder=root_folder_, batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=point_num_, point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.MSRA)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=(point_num_ * 3)), include=dict(phase=1), ntop=2)
elif (net_type == 'test-test'):
pose_data_param_test = dict(image_source=(root_folder_ + 'test_image_{}.txt'.format(test_id)), label_source=(root_folder_ + 'test_label_{}.txt'.format(test_id)), root_folder=root_folder_, batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=point_num_, point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.MSRA)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=(point_num_ * 3)), include=dict(phase=1), ntop=2)
(n.conv0, n.relu0) = conv_relu(n.data, 16)
n.conv1 = conv(n.relu0, 16)
n.pool1 = max_pool(n.conv1)
n.relu1 = L.ReLU(n.pool1, in_place=True)
(n.conv2_0, n.relu2_0) = conv_relu(n.pool1, 32, ks=1, pad=0)
(n.conv2, n.relu2) = conv_relu(n.relu2_0, 32)
n.conv3 = conv(n.relu2, 32)
n.res1 = L.Eltwise(n.conv2_0, n.conv3)
n.pool2 = max_pool(n.res1)
n.relu3 = L.ReLU(n.pool2, in_place=True)
(n.conv3_0, n.relu3_0) = conv_relu(n.relu3, 64, ks=1, pad=0)
(n.conv4, n.relu4) = conv_relu(n.relu3_0, 64)
n.conv5 = conv(n.relu4, 64)
n.res2 = L.Eltwise(n.conv3_0, n.conv5)
n.pool3 = max_pool(n.res2)
n.relu5 = L.ReLU(n.pool3, in_place=True)
(n.fc1, n.relu6_0, n.drop1_0) = fc_relu_dropout(n.relu5, 2048, 0.5)
(n.fc2, n.relu7_0, n.drop2_0) = fc_relu_dropout(n.drop1_0, 2048, 0.5)
n.fc3 = fc(n.drop2_0, (point_num_ * 3))
if (net_type == 'train'):
n.loss = L.SmoothL1Loss(n.fc3, n.pose, smooth_l1_loss_param=dict(sigma=10), loss_weight=1)
n.distance = L.PoseDistance(n.fc3, n.pose, n.center, loss_weight=0, pose_distance_param=dict(cube_length=150, fx=fx_, fy=fy_, ux=ux_, uy=uy_), include=dict(phase=1))
return (first_layer + str(n.to_proto()))
else:
(n.error, n.output) = L.PoseDistance(n.fc3, n.pose, n.center, pose_distance_param=dict(cube_length=150, fx=fx_, fy=fy_, ux=ux_, uy=uy_, output_pose=True), include=dict(phase=1), ntop=2)
return str(n.to_proto()) |
def split_with_prefix_and_suffix(types: tuple[(Type, ...)], prefix: int, suffix: int) -> tuple[(tuple[(Type, ...)], tuple[(Type, ...)], tuple[(Type, ...)])]:
if (len(types) <= (prefix + suffix)):
types = extend_args_for_prefix_and_suffix(types, prefix, suffix)
if suffix:
return (types[:prefix], types[prefix:(- suffix)], types[(- suffix):])
else:
return (types[:prefix], types[prefix:], ()) |
def create_dispatch_wrapper(op_name: str):
def factory_dispatch(*args, size=None, device='cpu'):
if (size is None):
raise AssertionError(f'Factory method call {op_name} requires expclit size parameter')
if isinstance(size, int):
dispatch_key = _device_to_dispatch_key[device]
else:
raise AssertionError(f'Unsupported size parameter type {type(size)}')
op = get_backend_functional(dispatch_key).__getattr__(op_name)
return op(*args, size=size, device=device)
if (op_name in _factory_methods):
return factory_dispatch
else:
return partial(_dispatch, op_name) |
def default_X_scheduler(num_X):
total_steps_16bs = (num_X * 90000)
if (num_X <= 2):
scheduler = L(MultiStepParamScheduler)(values=[1.0, 0.1, 0.01], milestones=[60000, 80000, 90000])
else:
scheduler = L(MultiStepParamScheduler)(values=[1.0, 0.1, 0.01], milestones=[(total_steps_16bs - 60000), (total_steps_16bs - 20000), total_steps_16bs])
return L(WarmupParamScheduler)(scheduler=scheduler, warmup_length=(1000 / total_steps_16bs), warmup_method='linear', warmup_factor=0.001) |
def to_dict_with_set_values(d):
result = {}
for (k, v) in d.items():
hashable_v = []
for v_elem in v:
if isinstance(v_elem, list):
hashable_v.append(tuple(v_elem))
else:
hashable_v.append(v_elem)
result[k] = set(hashable_v)
return result |
def log_lmk_and_img(image, landmark):
n = len(image)
images = []
images_with_lmk = []
for i in range(n):
img = (((image[i] + 1) / 2) * 255)
img = rearrange(img, 'h w c -> c h w')
img = img.cpu().numpy().transpose(1, 2, 0).astype('uint8').copy()
images.append(((img.transpose(2, 0, 1) / 127.5) - 1.0))
lmk = (landmark[i] * img.shape[0]).cpu().numpy().astype('int32')
for k in range(68):
img = cv2.circle(img, (lmk[(k, 0)], lmk[(k, 1)]), 3, (255, 0, 255), thickness=(- 1))
images_with_lmk.append(((img.transpose(2, 0, 1) / 127.5) - 1.0))
return (torch.tensor(images), torch.tensor(images_with_lmk)) |
def parallel_eval(*args):
(genotype, gpuid, data_path, save_path, dataset, cutout, epochs, policy) = args
print(args)
t = DARTSTrainer(data_path, save_path, genotype, dataset, cutout=cutout, epochs=epochs, gpu_id=gpuid, eval_policy=policy)
try:
t.train()
except:
logging.error('Error occurred in training on of the archs. The child process is terminated')
return (- 1)
return (1.0 - (t.retrieve() / 100.0)) |
class ModelParallelTestShared(MultiProcessTestBase):
_and_log
def setUp(self) -> None:
super().setUp()
num_features = 4
num_weighted_features = 2
shared_features = 2
self.tables = [EmbeddingBagConfig(num_embeddings=((i + 1) * 10), embedding_dim=((i + 2) * 8), name=('table_' + str(i)), feature_names=[('feature_' + str(i))]) for i in range(num_features)]
shared_features_tables = [EmbeddingBagConfig(num_embeddings=((i + 1) * 10), embedding_dim=((i + 2) * 8), name=('table_' + str((i + num_features))), feature_names=[('feature_' + str(i))]) for i in range(shared_features)]
self.tables += shared_features_tables
self.weighted_tables = [EmbeddingBagConfig(num_embeddings=((i + 1) * 10), embedding_dim=((i + 2) * 4), name=('weighted_table_' + str(i)), feature_names=[('weighted_feature_' + str(i))]) for i in range(num_weighted_features)]
self.shared_features = [f'feature_{i}' for i in range(shared_features)]
self.embedding_groups = {'group_0': [(f'{feature}{table.name}' if (feature in self.shared_features) else feature) for table in self.tables for feature in table.feature_names]}
if torch.cuda.is_available():
self.device = torch.device('cuda')
self.backend = 'nccl'
else:
self.device = torch.device('cpu')
self.backend = 'gloo'
def _test_sharding(self, sharders: List[ModuleSharder[nn.Module]], backend: str='gloo', world_size: int=2, local_size: Optional[int]=None, constraints: Optional[Dict[(str, ParameterConstraints)]]=None, model_class: Type[TestSparseNNBase]=TestSparseNN, qcomms_config: Optional[QCommsConfig]=None, apply_optimizer_in_backward_config: Optional[Dict[(str, Tuple[(Type[torch.optim.Optimizer], Dict[(str, Any)])])]]=None, variable_batch_size: bool=False, variable_batch_per_feature: bool=False, has_weighted_tables: bool=True, global_constant_batch: bool=False) -> None:
self._run_multi_process_test(callable=sharding_single_rank_test, world_size=world_size, local_size=local_size, model_class=model_class, tables=self.tables, weighted_tables=(self.weighted_tables if has_weighted_tables else None), embedding_groups=self.embedding_groups, sharders=sharders, backend=backend, optim=EmbOptimType.EXACT_SGD, constraints=constraints, qcomms_config=qcomms_config, variable_batch_size=variable_batch_size, apply_optimizer_in_backward_config=apply_optimizer_in_backward_config, variable_batch_per_feature=variable_batch_per_feature, global_constant_batch=global_constant_batch) |
def clean_str(string):
string = string.strip().strip('"')
string = re.sub("\\'s", " 's", string)
string = re.sub("\\'ve", " 've", string)
string = re.sub("n\\'t", " n't", string)
string = re.sub("\\'re", " 're", string)
string = re.sub("\\'d", " 'd", string)
string = re.sub("\\'ll", " 'll", string)
string = re.sub('\\s{2,}', ' ', string)
return string.strip().lower() |
def knnGPU_sharded(x_batches_f, y_batches_f, dim, k, direction='x2y'):
sims = []
inds = []
xfrom = 0
xto = 0
for x_batch_f in x_batches_f:
yfrom = 0
yto = 0
x_batch = load_batch(x_batch_f, dim)
xto = (xfrom + x_batch.shape[0])
(bsims, binds) = ([], [])
for y_batch_f in y_batches_f:
y_batch = load_batch(y_batch_f, dim)
neighbor_size = min(k, y_batch.shape[0])
yto = (yfrom + y_batch.shape[0])
print('{}-{} -> {}-{}'.format(xfrom, xto, yfrom, yto))
idx = faiss.IndexFlatIP(dim)
idx = faiss.index_cpu_to_all_gpus(idx)
idx.add(y_batch)
(bsim, bind) = idx.search(x_batch, neighbor_size)
bsims.append(bsim)
binds.append((bind + yfrom))
yfrom += y_batch.shape[0]
del idx
del y_batch
bsims = np.concatenate(bsims, axis=1)
binds = np.concatenate(binds, axis=1)
aux = np.argsort((- bsims), axis=1)
sim_batch = np.zeros((x_batch.shape[0], k), dtype=np.float32)
ind_batch = np.zeros((x_batch.shape[0], k), dtype=np.int64)
for i in range(x_batch.shape[0]):
for j in range(k):
sim_batch[(i, j)] = bsims[(i, aux[(i, j)])]
ind_batch[(i, j)] = binds[(i, aux[(i, j)])]
sims.append(sim_batch)
inds.append(ind_batch)
xfrom += x_batch.shape[0]
del x_batch
sim = np.concatenate(sims, axis=0)
ind = np.concatenate(inds, axis=0)
return (sim, ind) |
def test_multi_marker_union_multi_is_single_marker() -> None:
m = parse_marker('python_version >= "3" and sys_platform == "win32"')
m2 = parse_marker('sys_platform != "win32" and python_version >= "3"')
assert (str(m.union(m2)) == 'python_version >= "3"')
assert (str(m2.union(m)) == 'python_version >= "3"') |
class HTMLResource(GenericResource):
def parse(self, **kwargs):
(source, encoding) = self.get_source(buffered=True)
return iterparse(source, encoding, include_meta_charset_tag=True, **kwargs)
def extract_children(self, parsing_buffer):
location = self.filepath
for (elem, attr, url, pos) in parsing_buffer:
if (not self.scheduler.validate_url(url)):
continue
sub_context = self.context.create_new_from_url(url)
ans = self.scheduler.get_handler(elem.tag, self.session, self.config, self.scheduler, sub_context)
self.scheduler.handle_resource(ans)
resolved = ans.resolve(location)
elem.replace_url(url, resolved, attr, pos)
return parsing_buffer
def _retrieve(self):
if (not self.viewing_html()):
self.logger.info(('Resource of type [%s] is not HTML.' % self.content_type))
return super(HTMLResource, self)._retrieve()
if (not self.response.ok):
self.logger.debug(('Resource at [%s] is NOT ok and will be NOT processed.' % self.url))
return super(HTMLResource, self)._retrieve()
context = self.extract_children(self.parse())
context.root.insert(0, HtmlComment(self._get_watermark()))
retrieve_resource(BytesIO(tostring(context.root, include_meta_content_type=True)), self.filepath, self.context.url, overwrite=True)
self.logger.debug(('Retrieved content from the url: [%s]' % self.url))
del context
return self.filepath
def _get_watermark(self):
return (dedent('\n * PyWebCopy Engine [version %s]\n * Copyright 2020; Raja Tomar\n * File mirrored from [%s]\n * At UTC datetime: [%s]\n ') % (__version__, self.response.url, datetime.utcnow())) |
class MixedOp(nn.Module):
def __init__(self, C, stride, PRIMITIVES):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if ('pool' in primitive):
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
return sum(((w * op(x)) for (w, op) in zip(weights, self._ops))) |
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags)
Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style)
Resolver.__init__(self) |
def stride_crop_folder(src_folder, dst_folder, patch_size, stride, filter_func=None):
im_paths = _all_images(src_folder)
for im_path in im_paths:
im_name = os.path.basename(im_path).split('.')[0]
im = Image.open(im_path)
(w, h) = im.size
cnt = 0
for x in range((((w - patch_size) // stride) + 1)):
for y in range((((h - patch_size) // stride) + 1)):
startx = (x * stride)
starty = (y * stride)
patch = im.crop([startx, starty, (startx + patch_size), (starty + patch_size)])
if ((filter_func is not None) and (not filter_func(patch))):
cnt += 1
continue
patch_name = ('%s_%d.png' % (im_name, cnt))
cnt += 1
patch.save(os.path.join(dst_folder, patch_name))
print(('saving: %s' % os.path.join(dst_folder, patch_name))) |
def round_filters(filters, global_params):
multiplier = global_params.width_coefficient
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
if (not multiplier):
return filters
filters *= multiplier
min_depth = (min_depth or divisor)
new_filters = max(min_depth, ((int((filters + (divisor / 2))) // divisor) * divisor))
if (new_filters < (0.9 * filters)):
new_filters += divisor
return int(new_filters) |
def test_get_argspec():
checker = Checker()
visitor = ConfiguredNameCheckVisitor(__file__, '', {}, fail_after_first=False, checker=checker)
cwc_typed = TypedValue(ClassWithCall)
for _ in range(2):
asc = checker.arg_spec_cache
assert (Signature.make([SigParameter('arg')], callable=ClassWithCall.__call__) == visitor.signature_from_value(cwc_typed))
ata = AllTheAttrs([])
assert (asc.get_argspec(ata) is None)
assert (BoundMethodSignature(Signature.make([SigParameter('cls')], callable=ClassWithCall.normal_classmethod.__func__), Composite(KnownValue(ClassWithCall))) == asc.get_argspec(ClassWithCall.normal_classmethod))
assert (Signature.make([SigParameter('arg')], callable=ClassWithCall.normal_staticmethod) == asc.get_argspec(ClassWithCall.normal_staticmethod))
assert (Signature.make([SigParameter('capybara'), SigParameter('hutia', default=KnownValue(3)), SigParameter('tucotucos', ParameterKind.VAR_POSITIONAL), SigParameter('proechimys', ParameterKind.VAR_KEYWORD)], callable=function) == asc.get_argspec(function))
assert (Signature.make([SigParameter('x'), SigParameter('y')], callable=async_function.fn, is_asynq=True) == asc.get_argspec(async_function))
assert (Signature.make([SigParameter('x'), SigParameter('y')], callable=async_function.fn, is_asynq=True) == asc.get_argspec(async_function.asynq))
instance = ClassWithCall(1)
assert (BoundMethodSignature(Signature.make([SigParameter('self'), SigParameter('x')], callable=instance.async_method.decorator.fn, is_asynq=True), Composite(KnownValue(instance))) == asc.get_argspec(instance.async_method))
assert (BoundMethodSignature(Signature.make([SigParameter('self'), SigParameter('x')], callable=instance.async_method.decorator.fn, is_asynq=True), Composite(KnownValue(instance))) == asc.get_argspec(instance.async_method.asynq))
assert (Signature.make([SigParameter('y')], callable=ClassWithCall.async_staticmethod.fn, is_asynq=True) == asc.get_argspec(ClassWithCall.async_staticmethod))
assert (Signature.make([SigParameter('y')], callable=ClassWithCall.async_staticmethod.fn, is_asynq=True) == asc.get_argspec(ClassWithCall.async_staticmethod.asynq))
assert (BoundMethodSignature(Signature.make([SigParameter('cls'), SigParameter('z')], callable=ClassWithCall.async_classmethod.decorator.fn, is_asynq=True), Composite(KnownValue(ClassWithCall))) == asc.get_argspec(ClassWithCall.async_classmethod))
assert (BoundMethodSignature(Signature.make([SigParameter('cls'), SigParameter('z')], callable=ClassWithCall.async_classmethod.decorator.fn, is_asynq=True), Composite(KnownValue(ClassWithCall))) == asc.get_argspec(ClassWithCall.async_classmethod.asynq))
assert (BoundMethodSignature(Signature.make([SigParameter('cls'), SigParameter('ac')], callable=ClassWithCall.pure_async_classmethod.decorator.fn), Composite(KnownValue(ClassWithCall))) == asc.get_argspec(ClassWithCall.pure_async_classmethod))
if hasattr(ClassWithCall.classmethod_before_async, 'decorator'):
callable = ClassWithCall.classmethod_before_async.decorator.fn
else:
callable = ClassWithCall.classmethod_before_async.__func__.fn
assert (BoundMethodSignature(Signature.make([SigParameter('cls'), SigParameter('ac')], callable=callable, is_asynq=True), Composite(KnownValue(ClassWithCall))) == asc.get_argspec(ClassWithCall.classmethod_before_async))
assert (Signature.make([SigParameter('args', annotation=TypedValue(int)), SigParameter('kwargs', annotation=TypedValue(str))], KnownValue(None), callable=wrapped) == asc.get_argspec(wrapped))
decorated = decorator(wrapped)
assert (Signature.make([SigParameter('args', ParameterKind.VAR_POSITIONAL, annotation=AnyValue(AnySource.inference)), SigParameter('kwargs', ParameterKind.VAR_KEYWORD, annotation=AnyValue(AnySource.inference))], callable=decorated) == asc.get_argspec(decorated))
assert (Signature.make([SigParameter('x', ParameterKind.POSITIONAL_ONLY, annotation=TypedValue(int))], NewTypeValue(NT), callable=NT) == asc.get_argspec(NT)) |
def pass_calibration_data(sim_model, use_cuda):
data_loader = ImageNetDataPipeline.get_val_dataloader()
batch_size = data_loader.batch_size
if use_cuda:
device = torch.device('cuda')
else:
device = torch.device('cpu')
sim_model.eval()
samples = 1000
batch_cntr = 0
with torch.no_grad():
for (input_data, target_data) in data_loader:
inputs_batch = input_data.to(device)
sim_model(inputs_batch)
batch_cntr += 1
if ((batch_cntr * batch_size) > samples):
break |
def test_switch_default_all_call_inputs():
with pytest.raises(Call) as err:
switch(context=Context({'list': 'sg1', 'def': 'sg3', 'case1': False, 'case2': False, 'sg': 'sgv', 'fg': 'fgv', 'switch': [{'case': '{case1}', 'call': '{list}'}, {'case': '{case2}', 'call': 'sg2'}, {'default': {'groups': ['{def}', 'sg4'], 'success': '{sg}', 'failure': '{fg}'}}]}), name='blah')
cof = err.value
assert isinstance(cof, Call)
assert (cof.groups == ['sg3', 'sg4'])
assert (cof.success_group == 'sgv')
assert (cof.failure_group == 'fgv')
assert (cof.original_config == ('switch', [{'case': '{case1}', 'call': '{list}'}, {'case': '{case2}', 'call': 'sg2'}, {'default': {'groups': ['{def}', 'sg4'], 'success': '{sg}', 'failure': '{fg}'}}])) |
class BadUnaryOperationMessage(BadOperationMessage):
def __init__(self, operand, op, error):
self.operand = operand
self.op = op
self.error = error
def _object_type_helper(self):
from astroid import helpers
return helpers.object_type
def _object_type(self, obj):
objtype = self._object_type_helper(obj)
if isinstance(objtype, UninferableBase):
return None
return objtype
def __str__(self) -> str:
if hasattr(self.operand, 'name'):
operand_type = self.operand.name
else:
object_type = self._object_type(self.operand)
if hasattr(object_type, 'name'):
operand_type = object_type.name
else:
operand_type = object_type.as_string()
msg = 'bad operand type for unary {}: {}'
return msg.format(self.op, operand_type) |
class CharWordEmbedder(Configurable):
def __init__(self, embedder: CharEmbedder, layer: Encoder, shared_parameters: bool):
self.embeder = embedder
self.layer = layer
self.shared_parameters = shared_parameters
def embed(self, is_train, *char_ix):
embeds = self.embeder.embed(is_train, *char_ix)
if self.shared_parameters:
with tf.variable_scope('embedding'):
output = [self.layer.apply(is_train, embeds[0], char_ix[0][1])]
with tf.variable_scope('embedding', reuse=True):
for i in range(1, len(embeds)):
output.append(self.layer.apply(is_train, embeds[i], char_ix[i][1]))
else:
output = []
for (i, emb) in enumerate(embeds):
with tf.variable_scope(('embedding%d_%s' % (i, emb.name))):
output.append(self.layer.apply(is_train, emb, char_ix[i][1]))
return output
def __setstate__(self, state):
if ('state' in state):
state['state']['version'] = state['version']
state = state['state']
if ('share' in state):
state['shared_parameters'] = state['share']
del state['share']
super().__setstate__(state) |
(0)
def make_struct_proxy(cls, inner, overrides, handlers, prop_keys, prop_vals):
assert (((not prop_keys) and (not prop_vals)) or (len(prop_keys) == len(prop_vals)))
(map, _handlers) = impersonator_args(inner, overrides, handlers, prop_keys, prop_vals)
return cls.make(_handlers, inner, map) |
def getUrlOpener(use_proxy):
if use_proxy:
proxy = urllib.request.ProxyHandler({' use_proxy, ' use_proxy})
opener = urllib.request.build_opener(proxy)
else:
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'), ('x-matterport-application-name', 'showcase')]
return opener |
class SimpleDensity(Density):
def __init__(self, params_init={}, noise_std=1.0):
super().__init__(params_init=params_init)
self.noise_std = noise_std
def density_func(self, sdf, beta=None):
if (self.training and (self.noise_std > 0.0)):
noise = (torch.randn(sdf.shape).cuda() * self.noise_std)
sdf = (sdf + noise)
return torch.relu(sdf) |
def find_spec(modpath: list[str], path: (Sequence[str] | None)=None) -> ModuleSpec:
_path = (path or sys.path)
modpath = modpath[:]
submodule_path = None
module_parts = modpath[:]
processed: list[str] = []
while modpath:
modname = modpath.pop(0)
(finder, spec) = _find_spec_with_path(_path, modname, module_parts, processed, (submodule_path or path))
processed.append(modname)
if modpath:
if isinstance(finder, Finder):
submodule_path = finder.contribute_to_path(spec, processed)
elif (finder.__name__ in _EditableFinderClasses):
submodule_path = spec.submodule_search_locations
if (spec.type == ModuleType.PKG_DIRECTORY):
spec = spec._replace(submodule_search_locations=submodule_path)
return spec |
class TestQAOA(QiskitOptimizationTestCase):
([[W1, P1, M1, S1, False], [W2, P2, M2, S2, False], [W1, P1, M1, S1, True], [W2, P2, M2, S2, True]])
def test_qaoa(self, w, prob, m, solutions, convert_to_matrix_op):
seed = 0
aqua_globals.random_seed = seed
self.log.debug('Testing %s-step QAOA with MaxCut on graph\n%s', prob, w)
backend = BasicAer.get_backend('statevector_simulator')
optimizer = COBYLA()
(qubit_op, offset) = max_cut.get_operator(w)
qubit_op = qubit_op.to_opflow()
if convert_to_matrix_op:
qubit_op = qubit_op.to_matrix_op()
qaoa = QAOA(qubit_op, optimizer, prob, mixer=m)
quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed)
result = qaoa.run(quantum_instance)
x = sample_most_likely(result.eigenstate)
graph_solution = max_cut.get_graph_solution(x)
self.log.debug('energy: %s', result.eigenvalue.real)
self.log.debug('time: %s', result.optimizer_time)
self.log.debug('maxcut objective: %s', (result.eigenvalue.real + offset))
self.log.debug('solution: %s', graph_solution)
self.log.debug('solution objective: %s', max_cut.max_cut_value(x, w))
self.assertIn(''.join([str(int(i)) for i in graph_solution]), solutions)
([[W1, P1, S1, False], [W2, P2, S2, False], [W1, P1, S1, True], [W2, P2, S2, True]])
def test_qaoa_qc_mixer(self, w, prob, solutions, convert_to_matrix_op):
seed = 0
aqua_globals.random_seed = seed
self.log.debug('Testing %s-step QAOA with MaxCut on graph with a mixer as a parameterized circuit\n%s', prob, w)
backend = BasicAer.get_backend('statevector_simulator')
optimizer = COBYLA()
(qubit_op, _) = max_cut.get_operator(w)
qubit_op = qubit_op.to_opflow()
if convert_to_matrix_op:
qubit_op = qubit_op.to_matrix_op()
num_qubits = qubit_op.num_qubits
mixer = QuantumCircuit(num_qubits)
theta = Parameter('')
mixer.rx(theta, range(num_qubits))
qaoa = QAOA(qubit_op, optimizer, prob, mixer=mixer)
quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed)
result = qaoa.run(quantum_instance)
x = sample_most_likely(result.eigenstate)
graph_solution = max_cut.get_graph_solution(x)
self.assertIn(''.join([str(int(i)) for i in graph_solution]), solutions)
def test_qaoa_qc_mixer_many_parameters(self):
seed = 0
aqua_globals.random_seed = seed
optimizer = COBYLA()
(qubit_op, _) = max_cut.get_operator(W1)
qubit_op = qubit_op.to_opflow()
num_qubits = qubit_op.num_qubits
mixer = QuantumCircuit(num_qubits)
for i in range(num_qubits):
theta = Parameter(('' + str(i)))
mixer.rx(theta, range(num_qubits))
qaoa = QAOA(qubit_op, optimizer=optimizer, p=2, mixer=mixer)
backend = BasicAer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed)
result = qaoa.run(quantum_instance)
x = sample_most_likely(result.eigenstate)
print(x)
graph_solution = max_cut.get_graph_solution(x)
self.assertIn(''.join([str(int(i)) for i in graph_solution]), S1)
def test_qaoa_qc_mixer_no_parameters(self):
seed = 0
aqua_globals.random_seed = seed
(qubit_op, _) = max_cut.get_operator(W1)
qubit_op = qubit_op.to_opflow()
num_qubits = qubit_op.num_qubits
mixer = QuantumCircuit(num_qubits)
mixer.rx((np.pi / 2), range(num_qubits))
qaoa = QAOA(qubit_op, optimizer=COBYLA(), p=1, mixer=mixer)
backend = BasicAer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed)
result = qaoa.run(quantum_instance)
self.assertIsNotNone(result.eigenstate)
def test_change_operator_size(self):
aqua_globals.random_seed = 0
(qubit_op, _) = max_cut.get_operator(np.array([[0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0]]))
qaoa = QAOA(qubit_op.to_opflow(), COBYLA(), 1)
quantum_instance = QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed)
result = qaoa.run(quantum_instance)
x = sample_most_likely(result.eigenstate)
graph_solution = max_cut.get_graph_solution(x)
with self.subTest(msg='QAOA 4x4'):
self.assertIn(''.join([str(int(i)) for i in graph_solution]), {'0101', '1010'})
try:
(qubit_op, _) = max_cut.get_operator(np.array([[0, 1, 0, 1, 0, 1], [1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1], [1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1], [1, 0, 1, 0, 1, 0]]))
qaoa.operator = qubit_op.to_opflow()
except Exception as ex:
self.fail("Failed to change operator. Error: '{}'".format(str(ex)))
return
result = qaoa.run()
x = sample_most_likely(result.eigenstate)
graph_solution = max_cut.get_graph_solution(x)
with self.subTest(msg='QAOA 6x6'):
self.assertIn(''.join([str(int(i)) for i in graph_solution]), {'010101', '101010'})
([[W2, S2, None], [W2, S2, [0.0, 0.0]], [W2, S2, [1.0, 0.8]]])
def test_qaoa_initial_point(self, w, solutions, init_pt):
aqua_globals.random_seed = 10598
optimizer = COBYLA()
(qubit_op, _) = max_cut.get_operator(w)
first_pt = []
def cb_callback(eval_count, parameters, mean, std):
nonlocal first_pt
if (eval_count == 1):
first_pt = list(parameters)
quantum_instance = QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed)
qaoa = QAOA(qubit_op, optimizer, initial_point=init_pt, callback=cb_callback, quantum_instance=quantum_instance)
result = qaoa.compute_minimum_eigenvalue()
x = sample_most_likely(result.eigenstate)
graph_solution = max_cut.get_graph_solution(x)
with self.subTest('Initial Point'):
if (init_pt is None):
np.testing.assert_almost_equal([(- 0.2398), 0.3378], first_pt, decimal=4)
else:
self.assertListEqual(init_pt, first_pt)
with self.subTest('Solution'):
self.assertIn(''.join([str(int(i)) for i in graph_solution]), solutions)
([[W2, None], [W2, ([1.0] + (15 * [0.0]))], [W2, CUSTOM_SUPERPOSITION]])
def test_qaoa_initial_state(self, w, init_state):
optimizer = COBYLA()
(qubit_op, _) = max_cut.get_operator(w)
init_pt = [0.0, 0.0]
if (init_state is None):
initial_state = None
else:
initial_state = Custom(num_qubits=4, state_vector=init_state)
quantum_instance = QuantumInstance(BasicAer.get_backend('statevector_simulator'))
qaoa_zero_init_state = QAOA(qubit_op, optimizer, initial_point=init_pt, initial_state=Zero(qubit_op.num_qubits), quantum_instance=quantum_instance)
qaoa = QAOA(qubit_op, optimizer, initial_point=init_pt, initial_state=initial_state, quantum_instance=quantum_instance)
zero_circuits = qaoa_zero_init_state.construct_circuit(init_pt)
custom_circuits = qaoa.construct_circuit(init_pt)
self.assertEqual(len(zero_circuits), len(custom_circuits))
backend = BasicAer.get_backend('statevector_simulator')
for (zero_circ, custom_circ) in zip(zero_circuits, custom_circuits):
z_length = len(zero_circ.data)
c_length = len(custom_circ.data)
self.assertGreaterEqual(c_length, z_length)
self.assertTrue((zero_circ.data == custom_circ.data[(- z_length):]))
custom_init_qc = custom_circ.copy()
custom_init_qc.data = custom_init_qc.data[0:(c_length - z_length)]
if (initial_state is None):
original_init_qc = QuantumCircuit(qubit_op.num_qubits)
original_init_qc.h(range(qubit_op.num_qubits))
else:
original_init_qc = initial_state.construct_circuit()
job_init_state = execute(original_init_qc, backend)
job_qaoa_init_state = execute(custom_init_qc, backend)
statevector_original = job_init_state.result().get_statevector(original_init_qc)
statevector_custom = job_qaoa_init_state.result().get_statevector(custom_init_qc)
self.assertEqual(statevector_original.tolist(), statevector_custom.tolist())
def test_qaoa_random_initial_point(self):
aqua_globals.random_seed = 10598
w = nx.adjacency_matrix(nx.fast_gnp_random_graph(5, 0.5, seed=aqua_globals.random_seed)).toarray()
(qubit_op, _) = max_cut.get_operator(w)
qaoa = QAOA(qubit_op, NELDER_MEAD(disp=True), 1)
quantum_instance = QuantumInstance(BasicAer.get_backend('qasm_simulator'), seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed, shots=4096)
_ = qaoa.run(quantum_instance)
np.testing.assert_almost_equal([(- 0.8792), 0.3948], qaoa.optimal_params, decimal=4) |
class TransactionRequest(object):
def __init__(self, client):
self.client = client
self.operations = []
self.committed = False
def create(self, path, value=b'', acl=None, ephemeral=False, sequence=False):
if ((acl is None) and self.client.default_acl):
acl = self.client.default_acl
if (not isinstance(path, str)):
raise TypeError("Invalid type for 'path' (string expected)")
if (acl and (not isinstance(acl, (tuple, list)))):
raise TypeError("Invalid type for 'acl' (acl must be a tuple/list of ACL's")
if (not isinstance(value, bytes)):
raise TypeError("Invalid type for 'value' (must be a byte string)")
if (not isinstance(ephemeral, bool)):
raise TypeError("Invalid type for 'ephemeral' (bool expected)")
if (not isinstance(sequence, bool)):
raise TypeError("Invalid type for 'sequence' (bool expected)")
flags = 0
if ephemeral:
flags |= 1
if sequence:
flags |= 2
if (acl is None):
acl = OPEN_ACL_UNSAFE
self._add(Create(_prefix_root(self.client.chroot, path), value, acl, flags), None)
def delete(self, path, version=(- 1)):
if (not isinstance(path, str)):
raise TypeError("Invalid type for 'path' (string expected)")
if (not isinstance(version, int)):
raise TypeError("Invalid type for 'version' (int expected)")
self._add(Delete(_prefix_root(self.client.chroot, path), version))
def set_data(self, path, value, version=(- 1)):
if (not isinstance(path, str)):
raise TypeError("Invalid type for 'path' (string expected)")
if (not isinstance(value, bytes)):
raise TypeError("Invalid type for 'value' (must be a byte string)")
if (not isinstance(version, int)):
raise TypeError("Invalid type for 'version' (int expected)")
self._add(SetData(_prefix_root(self.client.chroot, path), value, version))
def check(self, path, version):
if (not isinstance(path, str)):
raise TypeError("Invalid type for 'path' (string expected)")
if (not isinstance(version, int)):
raise TypeError("Invalid type for 'version' (int expected)")
self._add(CheckVersion(_prefix_root(self.client.chroot, path), version))
def commit_async(self):
self._check_tx_state()
self.committed = True
async_object = self.client.handler.async_result()
self.client._call(Transaction(self.operations), async_object)
return async_object
def commit(self):
return self.commit_async().get()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if (not exc_type):
self.commit()
def _check_tx_state(self):
if self.committed:
raise ValueError('Transaction already committed')
def _add(self, request, post_processor=None):
self._check_tx_state()
self.client.logger.log(BLATHER, 'Added %r to %r', request, self)
self.operations.append(request) |
def _dense_predictor_heads(args: SharedArgs, label_maps: Dict[(Task, LabelMap)]) -> List[PredictorHeadInterface]:
optional_heads = [_optional_predictor_head(args, head_name, label_maps) for head_name in DENSE_HEAD_NAMES]
return [optional_head for optional_head in optional_heads if (optional_head is not None)] |
def main():
args = parse_args()
root = Path(args.save_path)
load_root = (Path(args.load_path) if args.load_path else None)
root.mkdir(parents=True, exist_ok=True)
with open((root / 'args.yml'), 'w') as f:
yaml.dump(args, f)
writer = SummaryWriter(str(root))
netG = Generator(args.n_mel_channels, args.ngf, args.n_residual_layers).cuda()
netD = Discriminator(args.num_D, args.ndf, args.n_layers_D, args.downsamp_factor).cuda()
print(netG)
print(netD)
optG = torch.optim.Adam(netG.parameters(), lr=0.0001, betas=(0.5, 0.9))
optD = torch.optim.Adam(netD.parameters(), lr=0.0001, betas=(0.5, 0.9))
if (load_root and load_root.exists()):
netG.load_state_dict(torch.load((load_root / 'netG.pt')))
optG.load_state_dict(torch.load((load_root / 'optG.pt')))
netD.load_state_dict(torch.load((load_root / 'netD.pt')))
optD.load_state_dict(torch.load((load_root / 'optD.pt')))
train_set = AudioDataset(Path(args.data_path), (Path(args.splits_path) / 'vggsound_train.txt'), args.seq_len, sampling_rate=22050)
test_set = AudioDataset(Path(args.data_path), (Path(args.splits_path) / 'vggsound_test.txt'), (22050 * 10), sampling_rate=22050, augment=False)
train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=4)
test_loader = DataLoader(test_set, batch_size=1)
test_voc = []
test_audio = []
for (i, x_t) in enumerate(test_loader):
x_t = x_t.cuda()
s_t = wav2mel(x_t)
test_voc.append(s_t.cuda())
test_audio.append(x_t)
audio = x_t.squeeze().cpu()
save_sample((root / f'original_{i}.wav'), 22050, audio)
writer.add_audio(f'original/sample_{i}.wav', audio, 0, sample_rate=22050)
if (i == (args.n_test_samples - 1)):
break
costs = []
start = time.time()
torch.backends.cudnn.benchmark = True
best_mel_reconst = 1000000
steps = 0
for epoch in range(1, (args.epochs + 1)):
for (iterno, x_t) in enumerate(train_loader):
x_t = x_t.cuda()
trim_len = (x_t.shape[(- 1)] // 256)
s_t = wav2mel(x_t.squeeze(1), trim_len)
x_pred_t = netG(s_t.cuda())
with torch.no_grad():
s_pred_t = wav2mel(x_pred_t.squeeze(1).detach().cpu(), trim_len)
s_error = F.l1_loss(s_t, s_pred_t).item()
D_fake_det = netD(x_pred_t.detach())
D_real = netD(x_t)
loss_D = 0
for scale in D_fake_det:
loss_D += F.relu((1 + scale[(- 1)])).mean()
for scale in D_real:
loss_D += F.relu((1 - scale[(- 1)])).mean()
netD.zero_grad()
loss_D.backward()
optD.step()
D_fake = netD(x_pred_t)
loss_G = 0
for scale in D_fake:
loss_G += (- scale[(- 1)].mean())
loss_feat = 0
feat_weights = (4.0 / (args.n_layers_D + 1))
D_weights = (1.0 / args.num_D)
wt = (D_weights * feat_weights)
for i in range(args.num_D):
for j in range((len(D_fake[i]) - 1)):
loss_feat += (wt * F.l1_loss(D_fake[i][j], D_real[i][j].detach()))
netG.zero_grad()
(loss_G + (args.lambda_feat * loss_feat)).backward()
optG.step()
costs.append([loss_D.item(), loss_G.item(), loss_feat.item(), s_error])
writer.add_scalar('loss/discriminator', costs[(- 1)][0], steps)
writer.add_scalar('loss/generator', costs[(- 1)][1], steps)
writer.add_scalar('loss/feature_matching', costs[(- 1)][2], steps)
writer.add_scalar('loss/mel_reconstruction', costs[(- 1)][3], steps)
steps += 1
if ((steps % args.save_interval) == 0):
st = time.time()
with torch.no_grad():
for (i, (voc, _)) in enumerate(zip(test_voc, test_audio)):
pred_audio = netG(voc)
pred_audio = pred_audio.squeeze().cpu()
save_sample((root / ('generated_%d.wav' % i)), 22050, pred_audio)
writer.add_audio(('generated/sample_%d.wav' % i), pred_audio, epoch, sample_rate=22050)
torch.save(netG.state_dict(), (root / 'netG.pt'))
torch.save(optG.state_dict(), (root / 'optG.pt'))
torch.save(netD.state_dict(), (root / 'netD.pt'))
torch.save(optD.state_dict(), (root / 'optD.pt'))
if (np.asarray(costs).mean(0)[(- 1)] < best_mel_reconst):
best_mel_reconst = np.asarray(costs).mean(0)[(- 1)]
torch.save(netD.state_dict(), (root / 'best_netD.pt'))
torch.save(netG.state_dict(), (root / 'best_netG.pt'))
print(('Took %5.4fs to generate samples' % (time.time() - st)))
print(('-' * 100))
if ((steps % args.log_interval) == 0):
print('Epoch {} | Iters {} / {} | ms/batch {:5.2f} | loss {}'.format(epoch, iterno, len(train_loader), ((1000 * (time.time() - start)) / args.log_interval), np.asarray(costs).mean(0)))
costs = []
start = time.time() |
def get_delta_stats_from_partition_stats(partition_stats_str: str):
partition_stats = PartitionStats.build_from_dict(partition_stats_str)
found_columns_stats_map: Dict[(int, List[DeltaStatsCacheResult])] = {}
for (stream_position, delta_stats) in partition_stats.delta_stats.items():
found_columns_stats: List[DeltaColumnStats] = []
missed_columns: List[str] = []
for cs in delta_stats.column_stats:
if cs.manifest_stats:
found_columns_stats.append(cs)
else:
missed_columns.append(cs.column)
delta_locator = delta_stats.column_stats[0].manifest_stats.delta_locator
found_stats: Optional[DeltaStats] = (DeltaStats.of(found_columns_stats) if found_columns_stats else None)
missed_stats: Optional[DeltaStatsCacheMiss] = (DeltaStatsCacheMiss(missed_columns, delta_locator) if missed_columns else None)
delta_stats_cache_res = DeltaStatsCacheResult.of(found_stats, missed_stats)
found_columns_stats_map[int(stream_position)] = delta_stats_cache_res
return found_columns_stats_map |
def test_cosh_rv_transform():
base_rv = pt.random.normal(0.5, 1, size=(2,), name='base_rv')
rv = pt.cosh(base_rv)
vv = rv.clone()
rv_logp = logp(rv, vv)
with pytest.raises(NotImplementedError):
logcdf(rv, vv)
with pytest.raises(NotImplementedError):
icdf(rv, vv)
transform = CoshTransform()
[back_neg, back_pos] = transform.backward(vv)
expected_logp = (pt.logaddexp(logp(base_rv, back_neg), logp(base_rv, back_pos)) + transform.log_jac_det(vv))
vv_test = np.array([0.25, 1.5])
np.testing.assert_allclose(rv_logp.eval({vv: vv_test}), np.nan_to_num(expected_logp.eval({vv: vv_test}), nan=(- np.inf))) |
class TestNegativeBinomialMuSigma(BaseTestDistributionRandom):
pymc_dist = pm.NegativeBinomial
pymc_dist_params = {'mu': 5.0, 'alpha': 8.0}
(expected_n, expected_p) = pm.NegativeBinomial.get_n_p(mu=pymc_dist_params['mu'], alpha=pymc_dist_params['alpha'], n=None, p=None)
expected_rv_op_params = {'n': expected_n, 'p': expected_p}
checks_to_run = ['check_pymc_params_match_rv_op'] |
def get_column_grnd_probs(model, pointer_logprobs):
assert (len(model.ids_to_grounding_choices) == len(pointer_logprobs)), (len(model.ids_to_grounding_choices), len(pointer_logprobs))
keep_pointer_logprobs = []
for (idx, grnd_choice) in model.ids_to_grounding_choices.items():
if (grnd_choice.choice_type == 'column'):
assert (pointer_logprobs[idx][0] == idx)
(tbl_name, col_name) = grnd_choice.choice
val_type = model.column_data[tbl_name][col_name]
if ((tbl_name, col_name) in model.value_columns):
keep_pointer_logprobs.append(pointer_logprobs[idx])
assert keep_pointer_logprobs, (model.value_columns, model.ids_to_grounding_choices)
return keep_pointer_logprobs |
class TestBinaryConfusionMatrix(MetricClassTester):
def _test_binary_confusion_matrix_with_input(self, input: torch.Tensor, target: torch.Tensor, normalize: Optional[str]=None) -> None:
input_np = input.flatten().numpy()
target_np = target.flatten().numpy()
compute_result = torch.tensor(skcm(target_np, input_np, labels=[0, 1], normalize=normalize), dtype=torch.float32)
self.run_class_implementation_tests(metric=BinaryConfusionMatrix(normalize=normalize), state_names={'confusion_matrix'}, update_kwargs={'input': input, 'target': target}, compute_result=compute_result)
def test_binary_confusion_matrix_base(self) -> None:
num_classes = 2
input = torch.randint(high=num_classes, size=(NUM_TOTAL_UPDATES, BATCH_SIZE))
target = torch.randint(high=num_classes, size=(NUM_TOTAL_UPDATES, BATCH_SIZE))
self._test_binary_confusion_matrix_with_input(input, target)
def test_binary_confusion_matrix_normalization(self) -> None:
num_classes = 2
input = torch.randint(high=num_classes, size=(NUM_TOTAL_UPDATES, BATCH_SIZE))
target = torch.randint(high=num_classes, size=(NUM_TOTAL_UPDATES, BATCH_SIZE))
self._test_binary_confusion_matrix_with_input(input, target, normalize='all')
self._test_binary_confusion_matrix_with_input(input, target, normalize='true')
self._test_binary_confusion_matrix_with_input(input, target, normalize='pred')
input = torch.randint(high=num_classes, size=(BATCH_SIZE,))
target = torch.randint(high=num_classes, size=(BATCH_SIZE,))
metric = BinaryConfusionMatrix()
metric.update(input, target)
metric.compute()
compute_result_all = torch.tensor(skcm(target, input, labels=[0, 1], normalize='all'), dtype=torch.float32)
torch.testing.assert_close(metric.normalized('all').to(torch.float32), compute_result_all, equal_nan=True, atol=1e-08, rtol=1e-05)
compute_result_pred = torch.tensor(skcm(target, input, labels=[0, 1], normalize='pred'), dtype=torch.float32)
torch.testing.assert_close(metric.normalized('pred').to(torch.float32), compute_result_pred, equal_nan=True, atol=1e-08, rtol=1e-05)
compute_result_true = torch.tensor(skcm(target, input, labels=[0, 1], normalize='true'), dtype=torch.float32)
torch.testing.assert_close(metric.normalized('true').to(torch.float32), compute_result_true, equal_nan=True, atol=1e-08, rtol=1e-05)
def test_binary_confusion_matrix_score_thresholding(self) -> None:
num_classes = 2
threshold = 0.7
input = [torch.tensor([0.7, 0.6, 0.5, 0.3, 0.9, 0.1, 1.0, 0.95, 0.2]), torch.tensor([0.7, 0.8, 0.3, 0.3, 0.3, 1.0, 0.1, 0.65, 0.2])]
input_thresholded = [torch.tensor([1, 0, 0, 0, 1, 0, 1, 1, 0]), torch.tensor([1, 1, 0, 0, 0, 1, 0, 0, 0])]
target = [torch.randint(high=num_classes, size=(9,)), torch.randint(high=num_classes, size=(9,))]
compute_result = torch.tensor(skcm(torch.cat(target, dim=0), torch.cat(input_thresholded, dim=0), labels=[0, 1])).to(torch.float32).squeeze()
self.run_class_implementation_tests(metric=BinaryConfusionMatrix(threshold=threshold), state_names={'confusion_matrix'}, update_kwargs={'input': input, 'target': target}, compute_result=compute_result, num_total_updates=2, num_processes=2)
def test_binary_confusion_matrix_invalid_input(self) -> None:
metric = BinaryConfusionMatrix()
with self.assertRaisesRegex(ValueError, 'input should be a one-dimensional tensor for binary confusion matrix, got shape torch.Size\\(\\[5, 10\\]\\).'):
input = torch.randint(high=2, size=(5, 10))
target = torch.randint(high=2, size=(5, 10))
metric.update(input, target)
with self.assertRaisesRegex(ValueError, 'target should be a one-dimensional tensor for binary confusion matrix, got shape torch.Size\\(\\[5, 10\\]\\).'):
input = torch.randint(high=2, size=(10,))
target = torch.randint(high=2, size=(5, 10))
metric.update(input, target)
with self.assertRaisesRegex(ValueError, 'The `input` and `target` should have the same dimensions, got shapes torch.Size\\(\\[11\\]\\) and torch.Size\\(\\[10\\]\\).'):
input = torch.randint(high=2, size=(11,))
target = torch.randint(high=2, size=(10,))
metric.update(input, target)
with self.assertRaisesRegex(ValueError, "normalize must be one of 'all', 'pred', 'true', or 'none'."):
metric = BinaryConfusionMatrix(normalize='this is not a valid option') |
def get_reruns_count(item):
rerun_marker = _get_marker(item)
if (rerun_marker is not None):
if ('reruns' in rerun_marker.kwargs):
return rerun_marker.kwargs['reruns']
elif (len(rerun_marker.args) > 0):
return rerun_marker.args[0]
else:
return 1
reruns = item.session.config.getvalue('reruns')
if (reruns is not None):
return reruns
with suppress(TypeError, ValueError):
reruns = int(item.session.config.getini('reruns'))
return reruns |
def test_lsp_that_completely_hides_base_socket_gives_good_error(monkeypatch: pytest.MonkeyPatch) -> None:
from .. import _io_windows
from .._windows_cffi import CData, WSAIoctls, _handle
def patched_get_underlying(sock: (int | CData), *, which: int=WSAIoctls.SIO_BASE_HANDLE) -> CData:
if hasattr(sock, 'fileno'):
sock = sock.fileno()
if (which == WSAIoctls.SIO_BASE_HANDLE):
raise OSError('nope')
else:
return _handle(sock)
monkeypatch.setattr(_io_windows, '_get_underlying_socket', patched_get_underlying)
with pytest.raises(RuntimeError, match="SIO_BASE_HANDLE failed and SIO_BSP_HANDLE_POLL didn't return a diff"):
_core.run(sleep, 0) |
def ip_of_device(device: str) -> str:
output = subprocess.check_output(f'ip -4 a show dev {device}'.split(), universal_newlines=True)
ip = None
for line in output.split('\n'):
line = line.strip()
if (not line.startswith('inet')):
continue
ip = line.split()[1].split('/')[0]
break
if (not ip):
raise ValueError(f'ip not found for {device}')
return ip |
('pyproj.sync.urlretrieve', autospec=True)
def test_download_resource_file__bad_sha256sum(urlretrieve_mock, tmp_path):
def dummy_urlretrieve(url, local_path):
local_path.touch()
urlretrieve_mock.side_effect = dummy_urlretrieve
with pytest.raises(RuntimeError, match='SHA256 mismatch: test_file.txt'):
_download_resource_file(file_url='test_url', short_name='test_file.txt', directory=tmp_path, verbose=False, sha256='test')
urlretrieve_mock.assert_called_with('test_url', (tmp_path / 'test_file.txt.part'))
assert (not tmp_path.joinpath('test_file.txt.part').exists())
assert (not tmp_path.joinpath('test_file.txt').exists()) |
class NTP_Miner():
output_filepath = ''
output_filename = 'NTP-Miner-Output.txt'
(mingap, maxgap) = (0, 3)
minsup = 500
s = 'hilkmftwv'
m = 'rcqgpsyn'
w = 'adeuox'
sDB = []
sub_ptn = []
def __init__(self, file_path='', output_filepath='', strong='hilkmftwv', middle='rcqgpsyn', week='adeuox', min_gap=0, max_gap=3, min_sup=500):
self.sDB = read_file(file_path)
self.output_filepath = output_filepath
(self.mingap, self.maxgap) = (min_gap, max_gap)
self.minsup = min_sup
self.s = strong
self.m = middle
self.w = week
class sub_ptn_struct():
start = ''
end = ''
(min, max) = (0, 0)
def __init__(self, start, end, min, max):
self.start = start
self.end = end
self.min = min
self.max = max
def min_freItem(self):
counter = {}
fre = []
for strs in self.sDB:
for c in strs:
if (self.belong(c, self.s) or self.belong(c, self.m)):
if counter.get(c):
counter[c] = (counter[c] + 1)
else:
counter[c] = 1
for key in counter.keys():
if (counter[key] >= self.minsup):
fre.append(key)
return sorted(fre)
def deal_range(self, pattern):
sub_ptn = []
if (len(pattern) == 1):
sub_ptn.append(self.sub_ptn_struct(pattern[0], '', 0, 0))
for i in range(0, (len(pattern) - 1)):
sub_ptn.append(self.sub_ptn_struct(pattern[i], pattern[(i + 1)], self.mingap, self.maxgap))
return sub_ptn
def create_nettree(self, nettree, seq):
occurnum = 0
for i in range(0, (len(self.sub_ptn) + 1)):
nettree.append([])
for i in range(0, (len(seq) - len(self.sub_ptn))):
if (seq[i] != self.sub_ptn[0].start):
continue
nettree[0].append(i)
occurnum = (occurnum + self.create_subnettree(nettree, seq, i, 2))
return occurnum
def create_subnettree(self, nettree, seq, parent, L):
if (L > (len(self.sub_ptn) + 1)):
return 1
for i in range((parent + 1), ((parent + self.sub_ptn[(L - 2)].min) + 1)):
if self.belong(seq[i], self.s):
return 0
for i in range(((parent + self.sub_ptn[(L - 2)].min) + 1), ((parent + self.sub_ptn[(L - 2)].max) + 2)):
if (i >= len(seq)):
break
if (seq[i] == self.sub_ptn[(L - 2)].end):
k = len(nettree[(L - 1)])
flag = (- 1)
for j in range(k):
if (i == nettree[(L - 1)][j]):
flag = j
break
if (flag == (- 1)):
nettree[(L - 1)].append(i)
if self.create_subnettree(nettree, seq, i, (L + 1)):
return 1
if ((not self.belong(seq[i], self.m)) and (not self.belong(seq[i], self.w))):
break
return 0
def belong(self, ch, str):
for c in str:
if (ch == c):
return True
return False
def output(self, freArr):
output_file = open(((self.output_filepath + '/') + self.output_filename), 'w')
for fre in freArr:
strArr = ''
for strs in fre:
strArr += (strs + ' ')
output_file.write((strArr + '\n'))
def solve(self, text):
text.insert(END, 'NTP_Miner,...\n')
text.insert(END, ':\n')
text.insert(END, ((((((':' + self.s) + ' :') + self.m) + ' :') + self.w) + '\n'))
text.insert(END, (((((('mingap:' + str(self.mingap)) + ' maxgap:') + str(self.maxgap)) + ' minsup:') + str(self.minsup)) + '\n'))
compnum = 0
f_level = 1
freArr = []
begin_time = time.time()
fre = self.min_freItem()
freArr.append(fre)
candidate = gen_candidate(fre, f_level)
while (len(candidate) != 0):
next_fre = []
freAns = []
for p in candidate:
occnum = 0
compnum = (compnum + 1)
for strs in self.sDB:
if (len(strs) > 0):
self.sub_ptn = self.deal_range(p)
num = 0
if ((len(self.sub_ptn) + 1) > len(strs)):
num = 0
else:
nettree = []
num = self.create_nettree(nettree, strs)
occnum += num
if (occnum >= self.minsup):
next_fre.append(p)
break
f_level += 1
freArr.append(next_fre)
candidate = gen_candidate(next_fre, f_level)
end_time = time.time()
time_consuming = (end_time - begin_time)
freNum = 0
text.insert(END, 'NTP_Miner\n')
for fre in freArr:
strArr = ''
for strs in fre:
strArr += (strs + ' ')
freNum += 1
text.insert(END, (strArr + '\n'))
text.insert(END, (('The number of frequent patterns:' + str(freNum)) + '\n'))
text.insert(END, ((('The time-consuming:' + str((time_consuming * 1000))) + 'ms') + '\n'))
text.insert(END, (('The number of calculation:' + str(compnum)) + '\n'))
self.output(freArr)
text.insert(END, ((((':' + self.output_filepath) + '/') + self.output_filename) + '\n'))
def solve_test(self):
compnum = 0
f_level = 1
freArr = []
begin_time = time.time()
print(',')
fre = self.min_freItem()
freArr.append(fre)
candidate = gen_candidate(fre, f_level)
while (len(candidate) != 0):
next_fre = []
for p in candidate:
occnum = 0
compnum = (compnum + 1)
for strs in self.sDB:
if (len(strs) > 0):
self.sub_ptn = self.deal_range(p)
num = 0
if ((len(self.sub_ptn) + 1) > len(strs)):
num = 0
else:
nettree = []
num = self.create_nettree(nettree, strs)
occnum += num
if (occnum >= self.minsup):
next_fre.append(p)
break
f_level += 1
freArr.append(next_fre)
candidate = gen_candidate(next_fre, f_level)
end_time = time.time()
time_consuming = (end_time - begin_time)
freNum = 0
print(':')
for fre in freArr:
strArr = ''
for strs in fre:
strArr += (strs + ' ')
freNum += 1
print(strArr)
print('The number of frequent patterns:', freNum)
print('The time-consuming:', (time_consuming * 1000), 'ms')
print('The number of calculation:', compnum)
return freArr |
def Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000):
if (weights not in {'imagenet', None}):
raise ValueError('The `weights` argument should be either `None` (random initialization) or `imagenet` (pre-training on ImageNet).')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as imagenet with `include_top` as true, `classes` should be 1000')
if (K.backend() != 'tensorflow'):
raise RuntimeError('The Xception model is only available with the TensorFlow backend.')
if (K.image_data_format() != 'channels_last'):
warnings.warn('The Xception model is only available for the input data format "channels_last" (width, height, channels). However your settings specify the default data format "channels_first" (channels, width, height). You should set `image_data_format="channels_last"` in your Keras config located at ~/.keras/keras.json. The model being returned right now will expect inputs to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
input_shape = _obtain_input_shape(input_shape, default_size=299, min_size=71, data_format=K.image_data_format(), require_flatten=False, weights=weights)
if (input_tensor is None):
img_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = ('block' + str((i + 5)))
x = Activation('relu', name=(prefix + '_sepconv1_act'))(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=(prefix + '_sepconv1'))(x)
x = BatchNormalization(name=(prefix + '_sepconv1_bn'))(x)
x = Activation('relu', name=(prefix + '_sepconv2_act'))(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=(prefix + '_sepconv2'))(x)
x = BatchNormalization(name=(prefix + '_sepconv2_bn'))(x)
x = Activation('relu', name=(prefix + '_sepconv3_act'))(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=(prefix + '_sepconv3'))(x)
x = BatchNormalization(name=(prefix + '_sepconv3_bn'))(x)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
x = layers.add([x, residual])
x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = BatchNormalization(name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = BatchNormalization(name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
elif (pooling == 'avg'):
x = GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = GlobalMaxPooling2D()(x)
if (input_tensor is not None):
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='xception')
if (weights == 'imagenet'):
if include_top:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels.h5', TF_WEIGHTS_PATH, cache_subdir='models')
else:
weights_path = get_file('xception_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model |
class BranchRootItem():
def __init__(self):
self.child_count = 0
self.child_items = []
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return None
def getRow(self):
return 0
def childCount(self):
return self.child_count
def hasChildren(self):
return (self.child_count > 0)
def getData(self, column):
return '' |
def foam_file_from_file(filepath, name=None, header=False):
if name:
(p, _name) = os.path.split(filepath)
assert (_name.lower() == name.lower()), 'Illegal file input {} for creating {}'.format(_name, name)
_values = CppDictParser.from_file(filepath).values
if ((not header) and ('FoamFile' in _values)):
del _values['FoamFile']
return _values |
def main():
menuOption = 0
if (not dsz.version.checks.IsWindows()):
dsz.ui.Echo('GROK requires a Windows OS', dsz.ERROR)
return 0
if dsz.version.checks.IsOs64Bit():
dsz.ui.Echo(('GROK %s requires x86' % version), dsz.ERROR)
return 0
if dsz.path.windows.GetSystemPath():
global systemPath
systemPath = dsz.path.windows.GetSystemPath()
else:
dsz.ui.Echo('Could not find system path', dsz.ERROR)
return 0
menu_list = list()
menu_list.append({dsz.menu.Name: 'Install', dsz.menu.Function: grokinstall})
menu_list.append({dsz.menu.Name: 'Uninstall', dsz.menu.Function: grokuninstall})
menu_list.append({dsz.menu.Name: 'Verify Install', dsz.menu.Function: grokverify})
menu_list.append({dsz.menu.Name: 'Collect and Parse', dsz.menu.Function: grokcollect})
menu_list.append({dsz.menu.Name: 'Parse Local', dsz.menu.Function: grokparse})
menu_list.append({dsz.menu.Name: 'Change Upload Name', dsz.menu.Function: changename})
while (menuOption != (- 1)):
(retvalue, menuOption) = dsz.menu.ExecuteSimpleMenu(('\n\n\nGrok %s Menu\n\nUpload Name: %s\n' % (version, fileName)), menu_list)
if (menuOption == 0):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'DEPLOYED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'DEPLOYED', 'Unsuccessful')
dsz.control.echo.Off()
cmd = ('stop %s' % putid)
dsz.cmd.Run(cmd)
dsz.control.echo.On()
elif (menuOption == 1):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'DELETED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'DELETED', 'Unsuccessful')
dsz.control.echo.Off()
cmd = ('stop %s' % putid)
dsz.cmd.Run(cmd)
dsz.control.echo.On()
elif (menuOption == 2):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Unsuccessful')
elif (menuOption == 3):
if (retvalue == True):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Successful')
if (retvalue == False):
dsz.lp.RecordToolUse(tool, version, 'EXERCISED', 'Unsuccessful')
dsz.control.echo.Off()
cmd = ('stop %s' % putid)
dsz.cmd.Run(cmd)
dsz.control.echo.On()
elif (menuOption == 4):
pass
dsz.ui.Echo('')
dsz.ui.Echo('* GROK script completed. *')
dsz.ui.Echo('')
return 0 |
class Interpolate(nn.Module):
def __init__(self, scale_factor: float=1.0, mode: str='nearest') -> None:
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, input: torch.Tensor) -> torch.Tensor:
return cast(torch.Tensor, interpolate(input, scale_factor=self.scale_factor, mode=self.mode))
def extra_repr(self) -> str:
extras = [f'scale_factor={self.scale_factor}']
if (self.mode != 'nearest'):
extras.append(f'mode={self.mode}')
return ', '.join(extras) |
def run_experiment(dataset: Dataset, method: PromptMethod, evaluator: Evaluator) -> Dict:
(predictions, gold_answers) = ([], [])
for (idx, data_item) in enumerate(dataset):
if (data_item.get('id', None) is None):
data_item['id'] = idx
if (use_cache and os.path.exists(os.path.join(tmp_save_dir, f"{idx}_{data_item['id']}.json"))):
with open(os.path.join(tmp_save_dir, f"{idx}_{data_item['id']}.json"), 'r') as f:
result_item = json.load(f)
(prediction, gold_answer) = (result_item['prediction'], result_item['gold_answer'])
else:
while True:
try:
current_key = openai_key_pool.get_key()
os.environ['OPENAI_API_KEY'] = current_key
start_time = time.time()
print('Using OpenAI key: ', current_key)
prediction = method.run(x=data_item, verbose=verbose)
print('One inference time: ', (time.time() - start_time))
break
except openai.error.OpenAIError as e:
print(f'Error when getting response: {e}')
continue
if (prediction is None):
prediction = '<empty>'
prediction = evaluator.normalize_answer(prediction)
gold_answer = evaluator.normalize_answer(data_item['answer'])
os.makedirs(tmp_save_dir, exist_ok=True)
with open(os.path.join(tmp_save_dir, f"{idx}_{data_item['id']}.json"), 'w') as f:
json.dump({'idx': idx, 'id': data_item['id'], 'prediction': prediction, 'gold_answer': gold_answer}, f)
predictions.append(prediction)
gold_answers.append(gold_answer)
print(f'idx: {idx}')
print(f"id: {data_item['id']}")
print(f'pred answer: {prediction}')
print(f'gold answer: {gold_answer}')
print(('-' * 80))
eval_dict = evaluator.evaluate(predictions, gold_answers)
return eval_dict |
class DarkNet(nn.Module):
def __init__(self, channels, odd_pointwise, avg_pool_size, cls_activ, alpha=0.1, in_channels=3, in_size=(224, 224), num_classes=1000):
super(DarkNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
stage.add_module('unit{}'.format((j + 1)), dark_convYxY(in_channels=in_channels, out_channels=out_channels, alpha=alpha, pointwise=((len(channels_per_stage) > 1) and (not ((((j + 1) % 2) == 1) ^ odd_pointwise)))))
in_channels = out_channels
if (i != (len(channels) - 1)):
stage.add_module('pool{}'.format((i + 1)), nn.MaxPool2d(kernel_size=2, stride=2))
self.features.add_module('stage{}'.format((i + 1)), stage)
self.output = nn.Sequential()
self.output.add_module('final_conv', nn.Conv2d(in_channels=in_channels, out_channels=num_classes, kernel_size=1))
if cls_activ:
self.output.add_module('final_activ', nn.LeakyReLU(negative_slope=alpha, inplace=True))
self.output.add_module('final_pool', nn.AvgPool2d(kernel_size=avg_pool_size, stride=1))
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
if ('final_conv' in name):
init.normal_(module.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), (- 1))
return x |
def metric_test_helper(target_clazz: Type[RecMetric], target_compute_mode: RecComputeMode, task_names: List[str], test_clazz: Type[TestMetric], metric_name: str, fused_update_limit: int=0, compute_on_all_ranks: bool=False, should_validate_update: bool=False, batch_window_size: int=BATCH_WINDOW_SIZE, n_classes: Optional[int]=None, nsteps: int=1, zero_weights: bool=False, is_time_dependent: bool=False, time_dependent_metric: Optional[Dict[(Type[RecMetric], str)]]=None, **kwargs: Any) -> None:
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
dist.init_process_group(backend='gloo', world_size=world_size, rank=rank)
(target_metrics, test_metrics) = rec_metric_value_test_helper(target_clazz=target_clazz, target_compute_mode=target_compute_mode, test_clazz=test_clazz, fused_update_limit=fused_update_limit, compute_on_all_ranks=False, should_validate_update=should_validate_update, world_size=world_size, my_rank=rank, task_names=task_names, batch_window_size=batch_window_size, n_classes=n_classes, nsteps=nsteps, is_time_dependent=is_time_dependent, time_dependent_metric=time_dependent_metric, zero_weights=zero_weights, **kwargs)
if (rank == 0):
for name in task_names:
if ((target_clazz != AUCMetric) and (target_clazz != AUPRCMetric)):
assert torch.allclose(target_metrics[f'{str(target_clazz._namespace)}-{name}|lifetime_{metric_name}'], test_metrics[0][name])
assert torch.allclose(target_metrics[f'{str(target_clazz._namespace)}-{name}|local_lifetime_{metric_name}'], test_metrics[2][name])
assert torch.allclose(target_metrics[f'{str(target_clazz._namespace)}-{name}|window_{metric_name}'], test_metrics[1][name])
assert torch.allclose(target_metrics[f'{str(target_clazz._namespace)}-{name}|local_window_{metric_name}'], test_metrics[3][name])
dist.destroy_process_group() |
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert (input_size in [112, 224]), 'input_size should be 112 or 224'
assert (num_layers in [50, 100, 152]), 'num_layers should be 50, 100 or 152'
assert (mode in ['ir', 'ir_se']), 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if (mode == 'ir'):
unit_module = bottleneck_IR
elif (mode == 'ir_se'):
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64))
if (input_size == 112):
self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio), Flatten(), Linear(((512 * 7) * 7), 512), BatchNorm1d(512, affine=affine))
else:
self.output_layer = Sequential(BatchNorm2d(512), Dropout(drop_ratio), Flatten(), Linear(((512 * 14) * 14), 512), BatchNorm1d(512, affine=affine))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride))
self.body = Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x) |
def test_incompatible_ok(hatch, helpers, temp_dir_data):
project_name = 'My.App'
with temp_dir_data.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir_data / 'my-app')
project = Project(project_path)
helpers.update_project_environment(project, 'default', {'skip-install': True, 'platforms': ['foo'], **project.config.envs['default']})
with project_path.as_cwd():
result = hatch('env', 'remove')
assert (result.exit_code == 0), result.output
assert (not result.output) |
class Stage(Block):
stage_number = Int.T(help='stage sequence number')
def read(cls, reader):
lstart = reader.current_line_number()
line = reader.readline()
obj = cls.deserialize(line, reader.version_dialect)
while True:
line = reader.readline()
if ((line is None) or (not line.startswith(b' '))):
reader.pushback()
break
obj.append_dataline(line, reader.version_dialect)
obj.comments.extend(reader.get_comments_after(lstart))
return obj
def write(self, writer):
line = self.serialize(writer.version_dialect)
writer.writeline(line)
self.write_datalines(writer)
for c in self.comments:
writer.writeline((' (%s)' % c).encode('ascii'))
def write_datalines(self, writer):
pass |
class TestHadoopCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('HadoopCollector', {'metrics': [(os.path.dirname(__file__) + '/fixtures/*metrics.log')], 'metrics_blacklist': ('.*rpc.*RpcProcessingTime_avg_time', '.*mapred\\.tasktracker\\.reduceTaskSlots')})
self.collector = HadoopCollector(config, {})
def test_import(self):
self.assertTrue(HadoopCollector)
(Collector, 'publish_metric')
def test_should_work_with_real_data(self, publish_mock):
self.collector.collect()
metrics = self.getPickledResults('expected.pkl')
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMetricMany(publish_mock, metrics) |
def addUpdateMatrix(qnnArch, currentUnitaries, trainingDataSv, lda, ep, gamma, outputStates, adjMatrix, storedStates, storedStatesSv, l, j):
res = (makeUpdateMatrixSv(qnnArch, currentUnitaries, trainingDataSv, storedStatesSv, lda, ep, l, j) + (gamma * makeUpdateMatrixGraph(qnnArch, currentUnitaries, lda, outputStates, adjMatrix, storedStates, l, j)))
return (((0 + 1j) * ep) * res).expm() |
class AttendeeConferenceRole(models.Model):
order_position_id = models.IntegerField(null=True, blank=True)
user = models.ForeignKey('users.User', on_delete=models.CASCADE, null=True, blank=True, verbose_name='user', related_name='+')
roles = models.JSONField(default=list)
conference = models.ForeignKey('conferences.Conference', on_delete=models.CASCADE) |
def _initialize_mem_buffs():
args = get_args()
if args.distribute_checkpointed_activations:
mpu.init_checkpointed_activations_memory_buffer()
mpu.init_workspace_memory_buffer()
mpu.init_QKV_forward_buffer()
mpu.init_QKV_dense_buffer()
mpu.init_h4h_forward_buffer()
mpu.init_fhh_forward_buffer()
mpu.init_backward_buffer()
mpu.init_parameter_gradient_buffer()
mpu.init_conjunction_gradient_buffer() |
class DictArray(GDict):
def __init__(self, item=None, capacity=None, faster=False):
super(DictArray, self).__init__(item, faster=faster)
if (item is None):
self.capacity = None
return
if (capacity is not None):
self.capacity = capacity
if (not faster):
self.memory = self.to_array(wrapper=False)
self.memory = self.unsqueeze(axis=0, wrapper=False)
if (capacity != 1):
self.memory = self.repeat(capacity, axis=0, wrapper=False)
elif (self.capacity is None):
self.capacity = self._get_one_attr(self.memory, 'shape')[0]
if (not faster):
self.assert_shape(self.memory, self.capacity)
def _get_one_attr(cls, memory, attr):
if isinstance(memory, dict):
for key in memory:
if hasattr(memory[key], attr):
return getattr(memory[key], attr)
ans = cls._get_one_attr(memory[key], attr)
if (ans is not None):
return ans
elif isinstance(memory, list):
for x in memory:
if hasattr(x, attr):
return getattr(x, attr)
ans = cls._get_one_attr(x, attr)
if (ans is not None):
return ans
elif hasattr(memory, attr):
return getattr(memory, attr)
return None
def check_shape(cls, memory, capacity):
if isinstance(memory, dict):
for key in memory:
if (not cls.check_shape(memory[key], capacity)):
return False
elif isinstance(memory, list):
for x in memory:
if (not cls.check_shape(x, capacity)):
return False
elif hasattr(memory, 'shape'):
return (memory.shape[0] == capacity)
return True
def assert_shape(cls, memory, capacity):
assert cls.check_shape(memory, capacity), f'The first dimension is not {capacity}!'
def sample(self, batch_size, valid_capacity=None, wrapper=True):
capacity = (self.capacity if (valid_capacity is None) else valid_capacity)
indices = np.random.randint(low=0, high=capacity, size=batch_size)
return self._recursive_do(self.memory, take, indices=indices, axis=0, wrapper=wrapper, capacity=batch_size)
def shuffle(self, valid_capacity=None, wrapper=True, in_place=True):
capacity = (self.capacity if (valid_capacity is None) else valid_capacity)
indices = shuffle(np.arange(capacity), axis=0)
if in_place:
items = self.take(np.arange(capacity), wrapper=False)
self.assign(indices, items)
else:
if (capacity < self.capacity):
indices = np.concatenate([indices, (np.arange((self.capacity - capacity)) + capacity)], axis=0)
return self._recursive_do(self.memory, take, indices=indices, axis=0, wrapper=wrapper, capacity=self.capacity)
def assign(self, indices, value):
if isinstance(value, GDict):
value = value.memory
self.memory = self._assign(self.memory, indices, value)
def gather(self, axis, index, wrapper=True):
return self._recursive_do(self.memory, gather, axis=axis, index=index, wrapper=wrapper)
def to_dict_array(self):
return DictArray(self.memory, capacity=self.capacity, faster=True)
def __len__(self):
return self.capacity |
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super().__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace) |
def domain_has_ip(resolver, domain):
len_dns_a = 0
len_dns_aaaa = 0
try:
dns_response = resolver.resolve(domain, RdataType.A)
len_dns_a = len(dns_response.rrset)
except (NoAnswer, NXDOMAIN, LifetimeTimeout, NoNameservers) as e:
pass
try:
dns_response = resolver.resolve(domain, RdataType.AAAA)
len_dns_aaaa = len(dns_response.rrset)
except (NoAnswer, NXDOMAIN, LifetimeTimeout, NoNameservers) as e:
pass
return ((len_dns_a + len_dns_aaaa) > 0) |
class Tunexpand(TestCase):
d = os.path.expanduser('~')
u = unexpand(d)
def test_base(self):
path = unexpand(self.d)
if is_win:
self.assertEqual(path, '%USERPROFILE%')
else:
self.assertEqual(path, '~')
def test_only_profile_case(self):
assert isinstance(unexpand(os.path.expanduser(fsnative('~'))), fsnative)
def test_base_trailing(self):
path = unexpand((self.d + os.path.sep))
self.assertEqual(path, (self.u + os.path.sep))
def test_noprefix(self):
path = unexpand(((self.d + 'foobar') + os.path.sep))
self.assertEqual(path, ((self.d + 'foobar') + os.path.sep))
def test_subfile(self):
path = unexpand(os.path.join(self.d, 'la', 'la'))
self.assertEqual(path, os.path.join(self.u, 'la', 'la'))
def test_case_insensitive_win(self):
if is_win:
assert (unexpand(self.d.lower()) == '%USERPROFILE%')
assert (unexpand(self.d.upper()) == '%USERPROFILE%') |
class Magnetization():
def __init__(self, num_spatial_orbitals: int) -> None:
self.num_spatial_orbitals = num_spatial_orbitals
def second_q_ops(self) -> Mapping[(str, FermionicOp)]:
return {self.__class__.__name__: s_z_operator(self.num_spatial_orbitals)}
def interpret(self, result: 'qiskit_nature.second_q.problems.EigenstateResult') -> None:
result.magnetization = []
if (result.aux_operators_evaluated is None):
return
for aux_op_eigenvalues in result.aux_operators_evaluated:
if (not isinstance(aux_op_eigenvalues, dict)):
continue
_key = self.__class__.__name__
if (aux_op_eigenvalues[_key] is not None):
result.magnetization.append(aux_op_eigenvalues[_key].real)
else:
result.magnetization.append(None) |
def create_inputs_fashion_mnist(is_train):
(tr_x, tr_y) = load_mnist(cfg.dataset_fashion_mnist, is_train)
data_queue = tf.train.slice_input_producer([tr_x, tr_y], capacity=(64 * 8))
(x, y) = tf.train.shuffle_batch(data_queue, num_threads=8, batch_size=cfg.batch_size, capacity=(cfg.batch_size * 64), min_after_dequeue=(cfg.batch_size * 32), allow_smaller_final_batch=False)
return (x, y) |
class RollFinder(with_metaclass(ABCMeta, object)):
def _active_contract(self, oc, front, back, dt):
raise NotImplementedError
def _get_active_contract_at_offset(self, root_symbol, dt, offset):
oc = self.asset_finder.get_ordered_contracts(root_symbol)
session = self.trading_calendar.minute_to_session_label(dt)
front = oc.contract_before_auto_close(session.value)
back = oc.contract_at_offset(front, 1, dt.value)
if (back is None):
return front
primary = self._active_contract(oc, front, back, session)
return oc.contract_at_offset(primary, offset, session.value)
def get_contract_center(self, root_symbol, dt, offset):
return self._get_active_contract_at_offset(root_symbol, dt, offset)
def get_rolls(self, root_symbol, start, end, offset):
oc = self.asset_finder.get_ordered_contracts(root_symbol)
front = self._get_active_contract_at_offset(root_symbol, end, 0)
back = oc.contract_at_offset(front, 1, end.value)
if (back is not None):
end_session = self.trading_calendar.minute_to_session_label(end)
first = self._active_contract(oc, front, back, end_session)
else:
first = front
first_contract = oc.sid_to_contract[first]
rolls = [((first_contract >> offset).contract.sid, None)]
tc = self.trading_calendar
sessions = tc.sessions_in_range(tc.minute_to_session_label(start), tc.minute_to_session_label(end))
freq = sessions.freq
if (first == front):
curr = (first_contract << 1)
else:
curr = (first_contract << 2)
session = sessions[(- 1)]
while ((session > start) and (curr is not None)):
front = curr.contract.sid
back = rolls[0][0]
prev_c = curr.prev
while (session > start):
prev = (session - freq)
if (prev_c is not None):
if (prev < prev_c.contract.auto_close_date):
break
if (back != self._active_contract(oc, front, back, prev)):
rolls.insert(0, ((curr >> offset).contract.sid, session))
break
session = prev
curr = curr.prev
if (curr is not None):
session = min(session, (curr.contract.auto_close_date + freq))
return rolls |
def _iter_plugins() -> Generator[(dict[(str, str)], None, None)]:
regex = '>([\\d\\w-]*)</a>'
response = requests.get(' timeout=20)
matches = [match for match in re.finditer(regex, response.text) if (match.groups()[0].startswith('pytask-') and (match.groups()[0] not in _EXCLUDED_PACKAGES))]
for match in tqdm(matches, smoothing=0):
name = match.groups()[0]
response = requests.get(f' timeout=20)
if (response.status_code == 404):
continue
response.raise_for_status()
info = response.json()['info']
if ('Development Status :: 7 - Inactive' in info['classifiers']):
continue
for classifier in _DEVELOPMENT_STATUS_CLASSIFIERS:
if (classifier in info['classifiers']):
status = classifier[22:]
break
else:
status = 'N/A'
requires = 'N/A'
if info['requires_dist']:
for requirement in info['requires_dist']:
if re.match('pytask(?![-.\\w])', requirement):
requires = requirement
break
def _version_sort_key(version_string: str) -> packaging.version.Version:
try:
return packaging.version.parse(version_string)
except packaging.version.InvalidVersion:
return packaging.version.Version('0.0.0alpha')
releases = response.json()['releases']
for release in sorted(releases, key=_version_sort_key, reverse=True):
if releases[release]:
release_date = datetime.date.fromisoformat(releases[release][(- 1)]['upload_time_iso_8601'].split('T')[0])
last_release = release_date.strftime('%b %d, %Y')
break
name = f":pypi:`{info['name']}`"
summary = ''
if info['summary']:
summary = _escape_rst(info['summary'].replace('\n', ''))
(yield {'name': name, 'summary': summary.strip(), 'last release': last_release, 'status': status, 'requires': requires}) |
class ByteBuffer():
def __init__(self):
self._deque = collections.deque()
self._size = 0
self._flow_controlled_length = 0
def append(self, data, flow_controlled_length=None):
if ((not isinstance(data, bytes)) and (not isinstance(data, bytearray))):
raise ValueError('Expected bytes')
if (flow_controlled_length is None):
flow_controlled_length = len(data)
if (flow_controlled_length < len(data)):
raise ValueError('flow_controlled_length should be >= len(data)')
self._deque.append((data, flow_controlled_length))
self._size += len(data)
def popleft_flowcontrol(self, amount):
if (amount > self._size):
raise ValueError('Trying to extract {} bytes from ByteBuffer of length {}'.format(amount, self._size))
data = []
flow_controlled_length = 0
while (amount > 0):
next_element = self._deque[0][0]
next_element_flow_controlled_length = self._deque[0][1]
if (amount >= len(next_element)):
self._size -= len(next_element)
self._flow_controlled_length -= next_element_flow_controlled_length
amount -= len(next_element)
flow_controlled_length += next_element_flow_controlled_length
data.append(next_element)
self._deque.popleft()
else:
data.append(next_element[:amount])
self._deque[0] = (next_element[amount:], (next_element_flow_controlled_length - amount))
self._size -= amount
self._flow_controlled_length -= amount
flow_controlled_length += amount
amount = 0
return (b''.join(data), flow_controlled_length)
def popleft(self, amount):
return self.popleft_flowcontrol(amount)[0]
def __len__(self):
return self._size
def flow_controlled_length(self):
return self._flow_controlled_length
def length(self):
return len(self) |
class rotate(Command):
description = 'delete older distributions, keeping N newest files'
user_options = [('match=', 'm', 'patterns to match (required)'), ('dist-dir=', 'd', 'directory where the distributions are'), ('keep=', 'k', 'number of matching distributions to keep')]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if (self.match is None):
raise DistutilsOptionError("Must specify one or more (comma-separated) match patterns (e.g. '.zip' or '.egg')")
if (self.keep is None):
raise DistutilsOptionError('Must specify number of files to keep')
try:
self.keep = int(self.keep)
except ValueError as e:
raise DistutilsOptionError('--keep must be an integer') from e
if isinstance(self.match, str):
self.match = [convert_path(p.strip()) for p in self.match.split(',')]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command('egg_info')
from glob import glob
for pattern in self.match:
pattern = ((self.distribution.get_name() + '*') + pattern)
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info('%d file(s) matching %s', len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info('Deleting %s', f)
if (not self.dry_run):
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.unlink(f) |
class JSONLoggerTest(unittest.TestCase):
def test_json_log(self) -> None:
with TemporaryDirectory() as tmpdir:
json_path = Path(tmpdir, 'test.json').as_posix()
logger = JSONLogger(json_path, steps_before_flushing=1)
log_name = 'asdf'
log_value = 123.0
log_step = 10
logger.log(log_name, log_value, log_step)
logger.close()
with open(json_path) as f:
d = json.load(f)
print(d)
self.assertTrue(len(d))
self.assertEqual(d[0][log_name], log_value)
self.assertEqual(d[0]['step'], log_step) |
class ArchiveFormat(v4.ArchiveFormat):
CHECKSUM_FILE = 'checksums.md5'
def write(cls, archive_record, type, format, data_initializer, provenance_capture):
super().write(archive_record, type, format, data_initializer, provenance_capture)
checksums = md5sum_directory(str(archive_record.root))
with (archive_record.root / cls.CHECKSUM_FILE).open('w') as fh:
for item in checksums.items():
fh.write(to_checksum_format(*item))
fh.write('\n') |
class Alignments():
def __init__(self, folder, filename='alignments', serializer='json'):
logger.debug("Initializing %s: (folder: '%s', filename: '%s', serializer: '%s')", self.__class__.__name__, folder, filename, serializer)
self.serializer = self.get_serializer(filename, serializer)
self.file = self.get_location(folder, filename)
self.data = self.load()
logger.debug('Initialized %s', self.__class__.__name__)
def frames_count(self):
retval = len(self.data)
logger.trace(retval)
return retval
def faces_count(self):
retval = sum((len(faces) for faces in self.data.values()))
logger.trace(retval)
return retval
def have_alignments_file(self):
retval = os.path.exists(self.file)
logger.trace(retval)
return retval
def hashes_to_frame(self):
hash_faces = dict()
for (frame_name, faces) in self.data.items():
for (idx, face) in enumerate(faces):
hash_faces.setdefault(face['hash'], dict())[frame_name] = idx
return hash_faces
def get_serializer(filename, serializer):
logger.debug("Getting serializer: (filename: '%s', serializer: '%s')", filename, serializer)
extension = os.path.splitext(filename)[1]
if (extension in ('.json', '.p', '.yaml', '.yml')):
logger.debug("Serializer set from file extension: '%s'", extension)
retval = Serializer.get_serializer_from_ext(extension)
elif (serializer not in ('json', 'pickle', 'yaml')):
raise ValueError("Error: {} is not a valid serializer. Use 'json', 'pickle' or 'yaml'")
else:
logger.debug("Serializer set from argument: '%s'", serializer)
retval = Serializer.get_serializer(serializer)
logger.verbose("Using '%s' serializer for alignments", retval.ext)
return retval
def get_location(self, folder, filename):
logger.debug("Getting location: (folder: '%s', filename: '%s')", folder, filename)
extension = os.path.splitext(filename)[1]
if (extension in ('.json', '.p', '.yaml', '.yml')):
logger.debug("File extension set from filename: '%s'", extension)
location = os.path.join(str(folder), filename)
else:
location = os.path.join(str(folder), '{}.{}'.format(filename, self.serializer.ext))
logger.debug("File extension set from serializer: '%s'", self.serializer.ext)
logger.verbose("Alignments filepath: '%s'", location)
return location
def load(self):
logger.debug('Loading alignments')
if (not self.have_alignments_file):
raise ValueError('Error: Alignments file not found at {}'.format(self.file))
try:
logger.info("Reading alignments from: '%s'", self.file)
with open(self.file, self.serializer.roptions) as align:
data = self.serializer.unmarshal(align.read())
except IOError as err:
logger.error("'%s' not read: %s", self.file, err.strerror)
exit(1)
logger.debug('Loaded alignments')
return data
def reload(self):
logger.debug('Re-loading alignments')
self.data = self.load()
logger.debug('Re-loaded alignments')
def save(self):
logger.debug('Saving alignments')
try:
logger.info("Writing alignments to: '%s'", self.file)
with open(self.file, self.serializer.woptions) as align:
align.write(self.serializer.marshal(self.data))
logger.debug('Saved alignments')
except IOError as err:
logger.error("'%s' not written: %s", self.file, err.strerror)
def backup(self):
logger.debug('Backing up alignments')
if (not os.path.isfile(self.file)):
logger.debug('No alignments to back up')
return
now = datetime.now().strftime('%Y%m%d_%H%M%S')
src = self.file
split = os.path.splitext(src)
dst = (((split[0] + '_') + now) + split[1])
logger.info("Backing up original alignments to '%s'", dst)
os.rename(src, dst)
logger.debug('Backed up alignments')
def frame_exists(self, frame):
retval = (frame in self.data.keys())
logger.trace("'%s': %s", frame, retval)
return retval
def frame_has_faces(self, frame):
retval = bool(self.data.get(frame, list()))
logger.trace("'%s': %s", frame, retval)
return retval
def frame_has_multiple_faces(self, frame):
if (not frame):
retval = False
else:
retval = bool((len(self.data.get(frame, list())) > 1))
logger.trace("'%s': %s", frame, retval)
return retval
def get_faces_in_frame(self, frame):
logger.trace("Getting faces for frame: '%s'", frame)
return self.data.get(frame, list())
def get_full_frame_name(self, frame):
retval = next((key for key in self.data.keys() if key.startswith(frame)))
logger.trace("Requested: '%s', Returning: '%s'", frame, retval)
return retval
def count_faces_in_frame(self, frame):
retval = len(self.data.get(frame, list()))
logger.trace(retval)
return retval
def delete_face_at_index(self, frame, idx):
logger.debug("Deleting face %s for frame '%s'", idx, frame)
idx = int(idx)
if ((idx + 1) > self.count_faces_in_frame(frame)):
logger.debug("No face to delete: (frame: '%s', idx %s)", frame, idx)
return False
del self.data[frame][idx]
logger.debug("Deleted face: (frame: '%s', idx %s)", frame, idx)
return True
def add_face(self, frame, alignment):
logger.debug("Adding face to frame: '%s'", frame)
if (frame not in self.data):
self.data[frame] = []
self.data[frame].append(alignment)
retval = (self.count_faces_in_frame(frame) - 1)
logger.debug('Returning new face index: %s', retval)
return retval
def update_face(self, frame, idx, alignment):
logger.debug("Updating face %s for frame '%s'", idx, frame)
self.data[frame][idx] = alignment
def filter_hashes(self, hashlist, filter_out=False):
hashset = set(hashlist)
for (filename, frame) in self.data.items():
for (idx, face) in reversed(list(enumerate(frame))):
if ((filter_out and (face.get('hash', None) in hashset)) or ((not filter_out) and (face.get('hash', None) not in hashset))):
logger.verbose('Filtering out face: (filename: %s, index: %s)', filename, idx)
del frame[idx]
else:
logger.trace('Not filtering out face: (filename: %s, index: %s)', filename, idx)
def yield_faces(self):
for (frame_fullname, alignments) in self.data.items():
frame_name = os.path.splitext(frame_fullname)[0]
face_count = len(alignments)
logger.trace("Yielding: (frame: '%s', faces: %s, frame_fullname: '%s')", frame_name, face_count, frame_fullname)
(yield (frame_name, alignments, face_count, frame_fullname))
def yield_original_index_reverse(image_alignments, number_alignments):
for (idx, _) in enumerate(reversed(image_alignments)):
original_idx = ((number_alignments - 1) - idx)
logger.trace('Yielding: face index %s', original_idx)
(yield original_idx)
def get_legacy_rotation(self):
logger.debug('Getting alignments containing legacy rotations')
keys = list()
for (key, val) in self.data.items():
if any((alignment.get('r', None) for alignment in val)):
keys.append(key)
logger.debug('Got alignments containing legacy rotations: %s', len(keys))
return keys
def rotate_existing_landmarks(self, frame_name, frame):
logger.trace("Rotating existing landmarks for frame: '%s'", frame_name)
dims = frame.shape[:2]
for face in self.get_faces_in_frame(frame_name):
angle = face.get('r', 0)
if (not angle):
logger.trace("Landmarks do not require rotation: '%s'", frame_name)
return
logger.trace("Rotating landmarks: (frame: '%s', angle: %s)", frame_name, angle)
r_mat = self.get_original_rotation_matrix(dims, angle)
rotate_landmarks(face, r_mat)
del face['r']
logger.trace("Rotatated existing landmarks for frame: '%s'", frame_name)
def get_original_rotation_matrix(dimensions, angle):
logger.trace('Getting original rotation matrix: (dimensions: %s, angle: %s)', dimensions, angle)
(height, width) = dimensions
center = ((width / 2), (height / 2))
r_mat = cv2.getRotationMatrix2D(center, ((- 1.0) * angle), 1.0)
abs_cos = abs(r_mat[(0, 0)])
abs_sin = abs(r_mat[(0, 1)])
rotated_width = int(((height * abs_sin) + (width * abs_cos)))
rotated_height = int(((height * abs_cos) + (width * abs_sin)))
r_mat[(0, 2)] += ((rotated_width / 2) - center[0])
r_mat[(1, 2)] += ((rotated_height / 2) - center[1])
logger.trace('Returning rotation matrix: %s', r_mat)
return r_mat
def get_legacy_no_hashes(self):
logger.debug('Getting alignments without face hashes')
keys = list()
for (key, val) in self.data.items():
for alignment in val:
if ('hash' not in alignment.keys()):
keys.append(key)
break
logger.debug('Got alignments without face hashes: %s', len(keys))
return keys
def add_face_hashes(self, frame_name, hashes):
logger.trace("Adding face hash: (frame: '%s', hashes: %s)", frame_name, hashes)
faces = self.get_faces_in_frame(frame_name)
count_match = (len(faces) - len(hashes))
if (count_match != 0):
msg = ('more' if (count_match > 0) else 'fewer')
logger.warning("There are %s %s face(s) in the alignments file than exist in the faces folder. Check your sources for frame '%s'.", abs(count_match), msg, frame_name)
for (idx, i_hash) in hashes.items():
faces[idx]['hash'] = i_hash |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.