code stringlengths 281 23.7M |
|---|
class DFN(nn.Module):
def __init__(self, num_class=19):
super(DFN, self).__init__()
self.num_class = num_class
self.resnet_features = resnet101(pretrained=False)
self.layer0 = nn.Sequential(self.resnet_features.conv1, self.resnet_features.bn1, self.resnet_features.relu)
self.layer1 = nn.Sequential(self.resnet_features.maxpool, self.resnet_features.layer1)
self.layer2 = self.resnet_features.layer2
self.layer3 = self.resnet_features.layer3
self.layer4 = self.resnet_features.layer4
self.out_conv = nn.Conv2d(2048, self.num_class, kernel_size=1, stride=1)
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.cab1 = CAB((self.num_class * 2), self.num_class)
self.cab2 = CAB((self.num_class * 2), self.num_class)
self.cab3 = CAB((self.num_class * 2), self.num_class)
self.cab4 = CAB((self.num_class * 2), self.num_class)
self.rrb_d_1 = RRB(256, self.num_class)
self.rrb_d_2 = RRB(512, self.num_class)
self.rrb_d_3 = RRB(1024, self.num_class)
self.rrb_d_4 = RRB(2048, self.num_class)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_4 = nn.Upsample(scale_factor=4, mode='bilinear')
self.upsample_8 = nn.Upsample(scale_factor=8, mode='bilinear')
self.rrb_u_1 = RRB(self.num_class, self.num_class)
self.rrb_u_2 = RRB(self.num_class, self.num_class)
self.rrb_u_3 = RRB(self.num_class, self.num_class)
self.rrb_u_4 = RRB(self.num_class, self.num_class)
self.rrb_db_1 = RRB(256, self.num_class)
self.rrb_db_2 = RRB(512, self.num_class)
self.rrb_db_3 = RRB(1024, self.num_class)
self.rrb_db_4 = RRB(2048, self.num_class)
self.rrb_trans_1 = RRB(self.num_class, self.num_class)
self.rrb_trans_2 = RRB(self.num_class, self.num_class)
self.rrb_trans_3 = RRB(self.num_class, self.num_class)
def forward(self, x):
f0 = self.layer0(x)
f1 = self.layer1(f0)
f2 = self.layer2(f1)
f3 = self.layer3(f2)
f4 = self.layer4(f3)
res1 = self.rrb_db_1(f1)
res1 = self.rrb_trans_1((res1 + self.upsample(self.rrb_db_2(f2))))
res1 = self.rrb_trans_2((res1 + self.upsample_4(self.rrb_db_3(f3))))
res1 = self.rrb_trans_3((res1 + self.upsample_8(self.rrb_db_4(f4))))
res2 = self.out_conv(f4)
res2 = self.global_pool(res2)
res2 = nn.Upsample(size=f4.size()[2:], mode='nearest')(res2)
f4 = self.rrb_d_4(f4)
res2 = self.cab4([res2, f4])
res2 = self.rrb_u_1(res2)
f3 = self.rrb_d_3(f3)
res2 = self.cab3([self.upsample(res2), f3])
res2 = self.rrb_u_2(res2)
f2 = self.rrb_d_2(f2)
res2 = self.cab2([self.upsample(res2), f2])
res2 = self.rrb_u_3(res2)
f1 = self.rrb_d_1(f1)
res2 = self.cab1([self.upsample(res2), f1])
res2 = self.rrb_u_4(res2)
return (res1, res2)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval() |
def draw_text_onimage(text, image, color=(255, 0, 0)):
if (image.dtype == np.float32):
image = (image * 255.0).astype(np.uint8)
assert (image.dtype == np.uint8)
text_image = Image.fromarray(image)
draw = ImageDraw.Draw(text_image)
draw.text((4, 0), text, fill=color)
return (np.array(text_image).astype(np.float32) / 255.0) |
class SunZenithReducer(SunZenithCorrectorBase):
def __init__(self, correction_limit=80.0, max_sza=90, strength=1.3, **kwargs):
self.correction_limit = correction_limit
self.strength = strength
super(SunZenithReducer, self).__init__(max_sza=max_sza, **kwargs)
if (self.max_sza is None):
raise ValueError('`max_sza` must be defined when using the SunZenithReducer.')
def _apply_correction(self, proj, coszen):
logger.debug(f'Applying sun-zenith signal reduction with correction_limit {self.correction_limit} deg, strength {self.strength}, and max_sza {self.max_sza} deg.')
res = proj.copy()
sunz = np.rad2deg(np.arccos(coszen.data))
res.data = sunzen_reduction(proj.data, sunz, limit=self.correction_limit, max_sza=self.max_sza, strength=self.strength)
return res |
def hdf5_read(filepath: (pathlib.Path | str), dataset_name: str) -> cunumeric.ndarray:
filepath = pathlib.Path(filepath)
annotations = SingleHdf5ToZarr(filepath, inline_threshold=0).translate()
zarr_group = zarr.open(fsspec.get_mapper('reference://', fo=annotations))
zarr_ary: zarr.Array = zarr_group[dataset_name]
if (zarr_ary.compressor is not None):
raise NotImplementedError("compressor isn't supported")
refs = annotations['refs']
offsets = []
tile_nbytes = (math.prod(zarr_ary.chunks) * zarr_ary.itemsize)
for chunk_coord in itertools.product(*(range(math.ceil((s / c))) for (s, c) in zip(zarr_ary.shape, zarr_ary.chunks))):
key = zarr_ary._chunk_key(chunk_coord)
(_, offset, nbytes) = refs[key]
offsets.append(offset)
assert (tile_nbytes == nbytes)
padded_ary = get_padded_array(zarr_ary)
if (padded_ary is None):
ret = cunumeric.empty(shape=zarr_ary.shape, dtype=zarr_ary.dtype)
read_tiles_by_offsets(ret, filepath=filepath, offsets=tuple(offsets), tile_shape=zarr_ary.chunks)
else:
read_tiles_by_offsets(padded_ary, filepath=filepath, offsets=tuple(offsets), tile_shape=zarr_ary.chunks)
ret = padded_ary[tuple((slice(s) for s in zarr_ary.shape))]
return ret |
def test_load_previous_state_previous_layout_not_layout(tmp_path: Path, default_echoes_configuration):
tmp_path.joinpath('preset.rdvpreset').write_text(json.dumps({'trick_level': 'foo'}))
tmp_path.joinpath('state.json').write_text('[]')
result = tracker_window._load_previous_state(tmp_path, default_echoes_configuration)
assert (result is None) |
_loss('simclr_info_nce_loss')
class SimclrInfoNCELoss(ClassyLoss):
def __init__(self, loss_config: AttrDict, device: str='gpu'):
super(SimclrInfoNCELoss, self).__init__()
self.loss_config = loss_config
self.temperature = self.loss_config.temperature
self.buffer_params = self.loss_config.buffer_params
self.info_criterion = SimclrInfoNCECriterion(self.buffer_params, self.temperature)
def from_config(cls, loss_config: AttrDict):
return cls(loss_config)
def forward(self, output, target):
normalized_output = nn.functional.normalize(output, dim=1, p=2)
loss = self.info_criterion(normalized_output)
return loss
def __repr__(self):
repr_dict = {'name': self._get_name(), 'info_average': self.info_criterion}
return pprint.pformat(repr_dict, indent=2) |
class GenerationConfig(PushToHubMixin):
def __init__(self, **kwargs):
self.max_length = kwargs.pop('max_length', 20)
self.max_new_tokens = kwargs.pop('max_new_tokens', None)
self.min_length = kwargs.pop('min_length', 0)
self.min_new_tokens = kwargs.pop('min_new_tokens', None)
self.early_stopping = kwargs.pop('early_stopping', False)
self.max_time = kwargs.pop('max_time', None)
self.do_sample = kwargs.pop('do_sample', False)
self.num_beams = kwargs.pop('num_beams', 1)
self.num_beam_groups = kwargs.pop('num_beam_groups', 1)
self.penalty_alpha = kwargs.pop('penalty_alpha', None)
self.use_cache = kwargs.pop('use_cache', True)
self.temperature = kwargs.pop('temperature', 1.0)
self.top_k = kwargs.pop('top_k', 50)
self.top_p = kwargs.pop('top_p', 1.0)
self.typical_p = kwargs.pop('typical_p', 1.0)
self.epsilon_cutoff = kwargs.pop('epsilon_cutoff', 0.0)
self.eta_cutoff = kwargs.pop('eta_cutoff', 0.0)
self.diversity_penalty = kwargs.pop('diversity_penalty', 0.0)
self.repetition_penalty = kwargs.pop('repetition_penalty', 1.0)
self.encoder_repetition_penalty = kwargs.pop('encoder_repetition_penalty', 1.0)
self.length_penalty = kwargs.pop('length_penalty', 1.0)
self.no_repeat_ngram_size = kwargs.pop('no_repeat_ngram_size', 0)
self.bad_words_ids = kwargs.pop('bad_words_ids', None)
self.force_words_ids = kwargs.pop('force_words_ids', None)
self.renormalize_logits = kwargs.pop('renormalize_logits', False)
self.constraints = kwargs.pop('constraints', None)
self.forced_bos_token_id = kwargs.pop('forced_bos_token_id', None)
self.forced_eos_token_id = kwargs.pop('forced_eos_token_id', None)
self.remove_invalid_values = kwargs.pop('remove_invalid_values', False)
self.exponential_decay_length_penalty = kwargs.pop('exponential_decay_length_penalty', None)
self.suppress_tokens = kwargs.pop('suppress_tokens', None)
self.begin_suppress_tokens = kwargs.pop('begin_suppress_tokens', None)
self.forced_decoder_ids = kwargs.pop('forced_decoder_ids', None)
self.num_return_sequences = kwargs.pop('num_return_sequences', 1)
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.output_scores = kwargs.pop('output_scores', False)
self.return_dict_in_generate = kwargs.pop('return_dict_in_generate', False)
self.pad_token_id = kwargs.pop('pad_token_id', None)
self.bos_token_id = kwargs.pop('bos_token_id', None)
self.eos_token_id = kwargs.pop('eos_token_id', None)
self.encoder_no_repeat_ngram_size = kwargs.pop('encoder_no_repeat_ngram_size', 0)
self.decoder_start_token_id = kwargs.pop('decoder_start_token_id', None)
self.generation_kwargs = kwargs.pop('generation_kwargs', {})
self._from_model_config = kwargs.pop('_from_model_config', False)
self._commit_hash = kwargs.pop('_commit_hash', None)
self.transformers_version = kwargs.pop('transformers_version', __version__)
if (not self._from_model_config):
for (key, value) in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
self.validate()
def __eq__(self, other):
if (not isinstance(other, GenerationConfig)):
return False
self_dict = self.__dict__.copy()
other_dict = other.__dict__.copy()
for metadata_field in ('_from_model_config', '_commit_hash', 'transformers_version'):
self_dict.pop(metadata_field, None)
other_dict.pop(metadata_field, None)
return (self_dict == other_dict)
def __repr__(self):
return f'{self.__class__.__name__} {self.to_json_string()}'
def validate(self):
if (self.early_stopping not in {True, False, 'never'}):
raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.")
def save_pretrained(self, save_directory: Union[(str, os.PathLike)], config_file_name: Optional[Union[(str, os.PathLike)]]=None, push_to_hub: bool=False, **kwargs):
config_file_name = (config_file_name if (config_file_name is not None) else GENERATION_CONFIG_NAME)
if os.path.isfile(save_directory):
raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop('commit_message', None)
repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[(- 1)])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
output_config_file = os.path.join(save_directory, config_file_name)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f'Configuration saved in {output_config_file}')
if push_to_hub:
self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('use_auth_token'))
def from_pretrained(cls, pretrained_model_name: Union[(str, os.PathLike)], config_file_name: Optional[Union[(str, os.PathLike)]]=None, **kwargs) -> 'GenerationConfig':
config_file_name = (config_file_name if (config_file_name is not None) else GENERATION_CONFIG_NAME)
cache_dir = kwargs.pop('cache_dir', None)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
use_auth_token = kwargs.pop('use_auth_token', None)
local_files_only = kwargs.pop('local_files_only', False)
revision = kwargs.pop('revision', None)
subfolder = kwargs.pop('subfolder', '')
from_pipeline = kwargs.pop('_from_pipeline', None)
from_auto_class = kwargs.pop('_from_auto', False)
commit_hash = kwargs.pop('_commit_hash', None)
user_agent = {'file_type': 'config', 'from_auto_class': from_auto_class}
if (from_pipeline is not None):
user_agent['using_pipeline'] = from_pipeline
config_path = os.path.join(pretrained_model_name, config_file_name)
config_path = str(config_path)
is_local = os.path.exists(config_path)
if os.path.isfile(os.path.join(subfolder, config_path)):
resolved_config_file = config_path
is_local = True
elif is_remote_url(config_path):
configuration_file = config_path
resolved_config_file = download_url(config_path)
else:
configuration_file = config_file_name
try:
resolved_config_file = cached_file(pretrained_model_name, configuration_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash)
commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
except EnvironmentError:
raise
except Exception:
raise EnvironmentError(f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it from ' make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory containing a {configuration_file} file")
try:
config_dict = cls._dict_from_json_file(resolved_config_file)
config_dict['_commit_hash'] = commit_hash
except (json.JSONDecodeError, UnicodeDecodeError):
raise EnvironmentError(f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file.")
if is_local:
logger.info(f'loading configuration file {resolved_config_file}')
else:
logger.info(f'loading configuration file {configuration_file} from cache at {resolved_config_file}')
return cls.from_dict(config_dict, **kwargs)
def _dict_from_json_file(cls, json_file: Union[(str, os.PathLike)]):
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
return json.loads(text)
def from_dict(cls, config_dict: Dict[(str, Any)], **kwargs) -> 'GenerationConfig':
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
kwargs.pop('_from_auto', None)
kwargs.pop('_from_pipeline', None)
if (('_commit_hash' in kwargs) and ('_commit_hash' in config_dict)):
kwargs['_commit_hash'] = config_dict['_commit_hash']
config = cls(**config_dict, **kwargs)
unused_kwargs = config.update(**kwargs)
logger.info(f'Generate config {config}')
if return_unused_kwargs:
return (config, unused_kwargs)
else:
return config
def dict_torch_dtype_to_str(self, d: Dict[(str, Any)]) -> None:
if ((d.get('torch_dtype', None) is not None) and (not isinstance(d['torch_dtype'], str))):
d['torch_dtype'] = str(d['torch_dtype']).split('.')[1]
for value in d.values():
if isinstance(value, dict):
self.dict_torch_dtype_to_str(value)
def to_diff_dict(self) -> Dict[(str, Any)]:
config_dict = self.to_dict()
default_config_dict = GenerationConfig().to_dict()
serializable_config_dict = {}
for (key, value) in config_dict.items():
if ((key not in default_config_dict) or (key == 'transformers_version') or (value != default_config_dict[key])):
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[(str, Any)]:
output = copy.deepcopy(self.__dict__)
if ('_commit_hash' in output):
del output['_commit_hash']
output['transformers_version'] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool=True) -> str:
if (use_diff is True):
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return (json.dumps(config_dict, indent=2, sort_keys=True) + '\n')
def to_json_file(self, json_file_path: Union[(str, os.PathLike)], use_diff: bool=True):
with open(json_file_path, 'w', encoding='utf-8') as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def from_model_config(cls, model_config: PretrainedConfig) -> 'GenerationConfig':
config_dict = model_config.to_dict()
config_dict.pop('_from_model_config', None)
config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)
for decoder_name in ('decoder', 'generator'):
if (decoder_name in config_dict):
default_generation_config = GenerationConfig()
decoder_config = config_dict[decoder_name]
for attr in config.to_dict().keys():
if ((attr in decoder_config) and (getattr(config, attr) == getattr(default_generation_config, attr))):
setattr(config, attr, decoder_config[attr])
return config
def update(self, **kwargs):
to_remove = []
for (key, value) in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
to_remove.append(key)
unused_kwargs = {key: value for (key, value) in kwargs.items() if (key not in to_remove)}
return unused_kwargs |
class positive_int(click.ParamType):
name = 'N'
def convert(self, value, param, ctx):
msg = 'must be a positive integer'
if isinstance(value, str):
try:
value = int(value)
except ValueError:
self.fail(msg, param, ctx)
if (not (value > 0)):
self.fail(msg, param, ctx)
return value |
.end_to_end()
def test_more_nested_pytree_and_python_node_as_return(runner, snapshot_cli, tmp_path):
source = '\n from pathlib import Path\n from typing import Any\n from typing_extensions import Annotated\n from pytask import PythonNode\n from typing import Dict\n\n nodes = [\n PythonNode(),\n (PythonNode(), PythonNode()),\n PythonNode()\n ]\n\n def task_example() -> Annotated[Dict[str, str], nodes]:\n return [{"first": "a", "second": "b"}, (1, 2), 1]\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, ['collect', '--nodes', tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
if (sys.platform != 'win32'):
assert (result.output == snapshot_cli()) |
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = strtobool(value)
except ValueError:
raise ValueError(f'If set, {key} must be yes or no.')
return _value |
def asdict(inst, recurse=True, filter=None, dict_factory=dict, retain_collection_types=False, value_serializer=None):
attrs = fields(inst.__class__)
rv = dict_factory()
for a in attrs:
v = getattr(inst, a.name)
if ((filter is not None) and (not filter(a, v))):
continue
if (value_serializer is not None):
v = value_serializer(inst, a, v)
if (recurse is True):
if has(v.__class__):
rv[a.name] = asdict(v, recurse=True, filter=filter, dict_factory=dict_factory, retain_collection_types=retain_collection_types, value_serializer=value_serializer)
elif isinstance(v, (tuple, list, set, frozenset)):
cf = (v.__class__ if (retain_collection_types is True) else list)
items = [_asdict_anything(i, is_key=False, filter=filter, dict_factory=dict_factory, retain_collection_types=retain_collection_types, value_serializer=value_serializer) for i in v]
try:
rv[a.name] = cf(items)
except TypeError:
if (not issubclass(cf, tuple)):
raise
rv[a.name] = cf(*items)
elif isinstance(v, dict):
df = dict_factory
rv[a.name] = df(((_asdict_anything(kk, is_key=True, filter=filter, dict_factory=df, retain_collection_types=retain_collection_types, value_serializer=value_serializer), _asdict_anything(vv, is_key=False, filter=filter, dict_factory=df, retain_collection_types=retain_collection_types, value_serializer=value_serializer)) for (kk, vv) in v.items()))
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv |
def _perform_login(user_obj, service_name):
(success, _) = common_login(user_obj.uuid)
if success:
if model.user.has_user_prompts(user_obj):
return redirect(url_for('web.updateuser', _scheme=app.config['PREFERRED_URL_SCHEME'], _external=True))
else:
return redirect(url_for('web.index', _scheme=app.config['PREFERRED_URL_SCHEME'], _external=True))
else:
return _render_ologin_error(service_name, 'Could not login. Account may be disabled') |
def _generate_primal(graph, gdf_network, fields, multigraph, oneway_column=None):
graph.graph['approach'] = 'primal'
msg = ' This can lead to unexpected behaviour. The intended usage of the conversion function is with networks made of LineStrings only.'
if ('LineString' not in gdf_network.geom_type.unique()):
warnings.warn(message=('The given network does not contain any LineString.' + msg), category=RuntimeWarning, stacklevel=3)
if (len(gdf_network.geom_type.unique()) > 1):
warnings.warn(message=('The given network consists of multiple geometry types.' + msg), category=RuntimeWarning, stacklevel=3)
key = 0
for row in gdf_network.itertuples():
first = row.geometry.coords[0]
last = row.geometry.coords[(- 1)]
data = list(row)[1:]
attributes = dict(zip(fields, data, strict=True))
if multigraph:
graph.add_edge(first, last, key=key, **attributes)
key += 1
if oneway_column:
oneway = bool(getattr(row, oneway_column))
if (not oneway):
graph.add_edge(last, first, key=key, **attributes)
key += 1
else:
graph.add_edge(first, last, **attributes) |
class CoreAudioDecoder(MediaDecoder):
def get_file_extensions(self):
return ('.aac', '.ac3', '.aif', '.aiff', '.aifc', '.caf', '.mp3', '.mp4', '.m4a', '.snd', '.au', '.sd2', '.wav')
def decode(self, filename, file, streaming=True):
if streaming:
return CoreAudioSource(filename, file)
else:
return StaticSource(CoreAudioSource(filename, file)) |
def patch_ptq_techniques(bn_folded_acc, cle_acc, adaround_acc):
def bn_folding(session, *_, **__):
session = deepcopy_tf_session(session)
_tf_session_set_flag(session, 'applied_bn_folding')
return (session, list())
def cle(session, *_, **__):
session = deepcopy_tf_session(session)
_tf_session_set_flag(session, 'applied_bn_folding')
_tf_session_set_flag(session, 'applied_cle')
return session
def adaround(session, *_, **__):
session = deepcopy_tf_session(session)
_tf_session_set_flag(session, 'applied_adaround')
return session
class _QuantizationSimModel(QuantizationSimModel):
def _add_and_configure_quant_nodes(self, *_, **__):
pass
def compute_encodings(self, forward_pass_callback, args):
def _forward_pass_callback(sess, args):
_run = sess.run
sess.run = (lambda *_, **__: None)
ret = forward_pass_callback(sess, args)
sess.run = _run
return ret
return super().compute_encodings(_forward_pass_callback, args)
def set_and_freeze_param_encodings(self, _):
pass
def mock_eval_callback(session, _):
if _tf_session_get_flag(session, 'applied_adaround'):
return adaround_acc
if _tf_session_get_flag(session, 'applied_cle'):
return cle_acc
if _tf_session_get_flag(session, 'applied_bn_folding'):
return bn_folded_acc
return FP32_ACC
class Mocks():
eval_callback: Callable
QuantizationSimModel: MagicMock
fold_all_batch_norms: MagicMock
equalize_model: MagicMock
apply_adaround: MagicMock
with patch('aimet_tensorflow.auto_quant.QuantizationSimModel', side_effect=_QuantizationSimModel) as mock_qsim, patch('aimet_tensorflow.auto_quant.fold_all_batch_norms', side_effect=bn_folding) as mock_bn_folding, patch('aimet_tensorflow.auto_quant.equalize_model', side_effect=cle) as mock_cle, patch('aimet_tensorflow.auto_quant.Adaround.apply_adaround', side_effect=adaround) as mock_adaround:
try:
(yield Mocks(eval_callback=mock_eval_callback, QuantizationSimModel=mock_qsim, fold_all_batch_norms=mock_bn_folding, equalize_model=mock_cle, apply_adaround=mock_adaround))
finally:
pass |
def test_async_subproc_maximal():
cmd = Command('arb', is_shell=True, cwd='cwd', is_save=True, is_text=True, encoding='enc', append=True)
assert (cmd.cmd == 'arb')
assert (cmd.is_shell is True)
assert (cmd.cwd == 'cwd')
assert (cmd.is_save is True)
assert (cmd.is_text is True)
assert (cmd.stdout == subprocess.PIPE)
assert (cmd.stderr == subprocess.PIPE)
assert (cmd.encoding == 'enc')
assert (cmd.append is True)
assert (cmd.results == [])
cmd = Command('arb', is_shell=True, cwd='cwd', is_save=False, is_text=True, stdout='stdout', stderr='stderr', encoding='enc', append=True)
assert (cmd.cmd == 'arb')
assert (cmd.is_shell is True)
assert (cmd.cwd == 'cwd')
assert (cmd.is_save is False)
assert (cmd.is_text is False)
assert (cmd.stdout == 'stdout')
assert (cmd.stderr == 'stderr')
assert (cmd.encoding == 'enc')
assert (cmd.append is True)
assert (cmd.results == []) |
def add_seqformer_config(cfg):
cfg.MODEL.SeqFormer = CN()
cfg.MODEL.SeqFormer.NUM_CLASSES = 80
cfg.INPUT.PRETRAIN_TYPE = 'v1'
cfg.INPUT.SAMPLING_FRAME_NUM = 1
cfg.INPUT.SAMPLING_FRAME_RANGE = 10
cfg.INPUT.SAMPLING_INTERVAL = 1
cfg.INPUT.SAMPLING_FRAME_SHUFFLE = False
cfg.INPUT.AUGMENTATIONS = []
cfg.INPUT.COCO_PRETRAIN = False
cfg.INPUT.PRETRAIN_SAME_CROP = False
cfg.MODEL.SeqFormer.MASK_WEIGHT = 2.0
cfg.MODEL.SeqFormer.DICE_WEIGHT = 5.0
cfg.MODEL.SeqFormer.GIOU_WEIGHT = 2.0
cfg.MODEL.SeqFormer.L1_WEIGHT = 5.0
cfg.MODEL.SeqFormer.CLASS_WEIGHT = 2.0
cfg.MODEL.SeqFormer.DEEP_SUPERVISION = True
cfg.MODEL.SeqFormer.MASK_STRIDE = 4
cfg.MODEL.SeqFormer.MATCH_STRIDE = 4
cfg.MODEL.SeqFormer.FOCAL_ALPHA = 0.25
cfg.MODEL.SeqFormer.SET_COST_CLASS = 2
cfg.MODEL.SeqFormer.SET_COST_BOX = 5
cfg.MODEL.SeqFormer.SET_COST_GIOU = 2
cfg.MODEL.SeqFormer.NHEADS = 8
cfg.MODEL.SeqFormer.DROPOUT = 0.1
cfg.MODEL.SeqFormer.DIM_FEEDFORWARD = 1024
cfg.MODEL.SeqFormer.ENC_LAYERS = 6
cfg.MODEL.SeqFormer.DEC_LAYERS = 6
cfg.MODEL.SeqFormer.HIDDEN_DIM = 256
cfg.MODEL.SeqFormer.NUM_OBJECT_QUERIES = 300
cfg.MODEL.SeqFormer.DEC_N_POINTS = 4
cfg.MODEL.SeqFormer.ENC_N_POINTS = 4
cfg.MODEL.SeqFormer.NUM_FEATURE_LEVELS = 4
cfg.MODEL.SeqFormer.MERGE_ON_CPU = True
cfg.MODEL.SeqFormer.MULTI_CLS_ON = True
cfg.MODEL.SeqFormer.APPLY_CLS_THRES = 0.05
cfg.MODEL.SeqFormer.CLIP_MATCHING = False
cfg.MODEL.SeqFormer.CLIP_LENGTH = 5
cfg.MODEL.SeqFormer.CLIP_STRIDE = 1
cfg.SOLVER.OPTIMIZER = 'ADAMW'
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
cfg.MODEL.SWIN = CN()
cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224
cfg.MODEL.SWIN.PATCH_SIZE = 4
cfg.MODEL.SWIN.EMBED_DIM = 96
cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
cfg.MODEL.SWIN.WINDOW_SIZE = 7
cfg.MODEL.SWIN.MLP_RATIO = 4.0
cfg.MODEL.SWIN.QKV_BIAS = True
cfg.MODEL.SWIN.QK_SCALE = None
cfg.MODEL.SWIN.DROP_RATE = 0.0
cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0
cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3
cfg.MODEL.SWIN.APE = False
cfg.MODEL.SWIN.PATCH_NORM = True
cfg.MODEL.SWIN.OUT_FEATURES = ['res2', 'res3', 'res4', 'res5']
cfg.MODEL.SWIN.USE_CHECKPOINT = False
cfg.FIND_UNUSED_PARAMETERS = True |
class RoIPoolFunction(Function):
def forward(ctx, features, rois, out_size, spatial_scale):
assert features.is_cuda
(out_h, out_w) = _pair(out_size)
assert (isinstance(out_h, int) and isinstance(out_w, int))
ctx.save_for_backward(rois)
num_channels = features.size(1)
num_rois = rois.size(0)
out_size = (num_rois, num_channels, out_h, out_w)
output = features.new_zeros(out_size)
argmax = features.new_zeros(out_size, dtype=torch.int)
roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale, output, argmax)
ctx.spatial_scale = spatial_scale
ctx.feature_size = features.size()
ctx.argmax = argmax
return output
_differentiable
def backward(ctx, grad_output):
assert grad_output.is_cuda
spatial_scale = ctx.spatial_scale
feature_size = ctx.feature_size
argmax = ctx.argmax
rois = ctx.saved_tensors[0]
assert (feature_size is not None)
grad_input = grad_rois = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.new_zeros(feature_size)
roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax, spatial_scale, grad_input)
return (grad_input, grad_rois, None, None) |
def concat_guess_data(stock_column, data):
print('stock_column:', stock_column)
tmp_dic = {}
for col in stock_column:
if (col == 'date'):
tmp_dic[col] = data['date']
elif (col == 'code'):
tmp_dic[col] = data['code']
else:
tmp_dic[col] = data['latest_price']
print(' BEGIN ')
stock_guess = pd.DataFrame(tmp_dic, index=data.index.values)
print(stock_guess.columns.values)
stock_guess = stock_guess.apply(apply_guess, stock_column=stock_column, axis=1)
print(stock_guess.head())
stock_guess.drop('date', axis=1, inplace=True)
data_new = pd.merge(data, stock_guess, on=['code'], how='left')
print('')
return data_new |
def test_new_type_value() -> None:
nt1 = NewType('nt1', int)
nt1_val = value.NewTypeValue(nt1)
nt2 = NewType('nt2', int)
nt2_val = value.NewTypeValue(nt2)
assert_can_assign(nt1_val, nt1_val)
assert_cannot_assign(nt1_val, nt2_val)
assert_can_assign(nt1_val, TypedValue(int))
assert_can_assign(TypedValue(int), nt1_val)
assert_cannot_assign(nt1_val, TypedValue(Capybara))
assert_cannot_assign(nt1_val, KnownValue(Capybara.hydrochaeris)) |
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(feature_size, 300)
self.fc2 = nn.Linear(300, 3)
def forward(self, x):
x = x.view((- 1), feature_size)
emb = F.relu(self.fc1(x))
(nn.Dropout(0.5),)
x = self.fc2(emb)
return x |
class HelpCategories(cmd2.Cmd):
START_TIMES = ['now', 'later', 'sometime', 'whenever']
CMD_CAT_CONNECTING = 'Connecting'
CMD_CAT_APP_MGMT = 'Application Management'
CMD_CAT_SERVER_INFO = 'Server Information'
def __init__(self):
super().__init__()
def do_connect(self, _):
self.poutput('Connect')
cmd2.categorize(do_connect, CMD_CAT_CONNECTING)
.with_category(CMD_CAT_CONNECTING)
def do_which(self, _):
self.poutput('Which')
def do_list(self, _):
self.poutput('List')
def do_deploy(self, _):
self.poutput('Deploy')
start_parser = argparse_custom.DEFAULT_ARGUMENT_PARSER(description='Start', epilog='my_decorator runs even with argparse errors')
start_parser.add_argument('when', choices=START_TIMES, help='Specify when to start')
_decorator
.with_argparser(start_parser)
def do_start(self, _):
self.poutput('Start')
def do_sessions(self, _):
self.poutput('Sessions')
def do_redeploy(self, _):
self.poutput('Redeploy')
restart_parser = argparse_custom.DEFAULT_ARGUMENT_PARSER(description='Restart', epilog='my_decorator does not run when argparse errors')
restart_parser.add_argument('when', choices=START_TIMES, help='Specify when to restart')
.with_argparser(restart_parser)
.with_category(CMD_CAT_APP_MGMT)
_decorator
def do_restart(self, _):
self.poutput('Restart')
def do_expire(self, _):
self.poutput('Expire')
def do_undeploy(self, _):
self.poutput('Undeploy')
def do_stop(self, _):
self.poutput('Stop')
def do_findleakers(self, _):
self.poutput('Find Leakers')
cmd2.categorize((do_list, do_deploy, do_start, do_sessions, do_redeploy, do_expire, do_undeploy, do_stop, do_findleakers), CMD_CAT_APP_MGMT)
def do_resources(self, _):
self.poutput('Resources')
def do_status(self, _):
self.poutput('Status')
def do_serverinfo(self, _):
self.poutput('Server Info')
def do_thread_dump(self, _):
self.poutput('Thread Dump')
def do_sslconnectorciphers(self, _):
self.poutput('SSL Connector Ciphers')
def do_vminfo(self, _):
self.poutput('VM Info')
cmd2.categorize(do_resources, CMD_CAT_SERVER_INFO)
cmd2.categorize(do_status, CMD_CAT_SERVER_INFO)
cmd2.categorize(do_serverinfo, CMD_CAT_SERVER_INFO)
cmd2.categorize(do_thread_dump, CMD_CAT_SERVER_INFO)
cmd2.categorize(do_sslconnectorciphers, CMD_CAT_SERVER_INFO)
cmd2.categorize(do_vminfo, CMD_CAT_SERVER_INFO)
def do_config(self, _):
self.poutput('Config')
def do_version(self, _):
self.poutput(cmd2.__version__)
.with_category('Command Management')
def do_disable_commands(self, _):
message_to_print = '{} is not available while {} commands are disabled'.format(COMMAND_NAME, self.CMD_CAT_APP_MGMT)
self.disable_category(self.CMD_CAT_APP_MGMT, message_to_print)
self.poutput('The Application Management commands have been disabled')
.with_category('Command Management')
def do_enable_commands(self, _):
self.enable_category(self.CMD_CAT_APP_MGMT)
self.poutput('The Application Management commands have been enabled') |
class BezierCurve(QQuickItem):
p1Changed = pyqtSignal(QPointF)
(QPointF, notify=p1Changed)
def p1(self):
return self._p1
.setter
def p1(self, p):
if (self._p1 != p):
self._p1 = QPointF(p)
self.p1Changed.emit(p)
self.update()
p2Changed = pyqtSignal(QPointF)
(QPointF, notify=p2Changed)
def p2(self):
return self._p2
.setter
def p2(self, p):
if (self._p2 != p):
self._p2 = QPointF(p)
self.p2Changed.emit(p)
self.update()
p3Changed = pyqtSignal(QPointF)
(QPointF, notify=p3Changed)
def p3(self):
return self._p3
.setter
def p3(self, p):
if (self._p3 != p):
self._p3 = QPointF(p)
self.p3Changed.emit(p)
self.update()
p4Changed = pyqtSignal(QPointF)
(QPointF, notify=p4Changed)
def p4(self):
return self._p4
.setter
def p4(self, p):
if (self._p4 != p):
self._p4 = QPointF(p)
self.p4Changed.emit(p)
self.update()
segmentCountChanged = pyqtSignal(int)
(int, notify=segmentCountChanged)
def segmentCount(self):
return self._segmentCount
def segmentCount(self, count):
if (self._segmentCount != count):
self._segmentCount = count
self.segmentCountChanged.emit(count)
self.update()
def __init__(self, parent=None):
super(BezierCurve, self).__init__(parent)
self._p1 = QPointF(0, 0)
self._p2 = QPointF(1, 0)
self._p3 = QPointF(0, 1)
self._p4 = QPointF(1, 1)
self._segmentCount = 32
self._root_node = None
self.setFlag(QQuickItem.ItemHasContents, True)
def updatePaintNode(self, oldNode, nodeData):
if (self._root_node is None):
self._root_node = QSGGeometryNode()
geometry = QSGGeometry(QSGGeometry.defaultAttributes_Point2D(), self._segmentCount)
geometry.setLineWidth(2)
geometry.setDrawingMode(QSGGeometry.GL_LINE_STRIP)
self._root_node.setGeometry(geometry)
self._root_node.setFlag(QSGNode.OwnsGeometry)
material = QSGFlatColorMaterial()
material.setColor(QColor(255, 0, 0))
self._root_node.setMaterial(material)
self._root_node.setFlag(QSGNode.OwnsMaterial)
else:
geometry = self._root_node.geometry()
geometry.allocate(self._segmentCount)
w = self.width()
h = self.height()
vertices = geometry.vertexDataAsPoint2D()
for i in range(self._segmentCount):
t = (i / float((self._segmentCount - 1)))
invt = (1 - t)
pos = ((((((invt * invt) * invt) * self._p1) + ((((3 * invt) * invt) * t) * self._p2)) + ((((3 * invt) * t) * t) * self._p3)) + (((t * t) * t) * self._p4))
vertices[i].set((pos.x() * w), (pos.y() * h))
self._root_node.markDirty(QSGNode.DirtyGeometry)
return self._root_node |
def modifyModlist(old_entry, new_entry, ignore_attr_types=None, ignore_oldexistent=0, case_ignore_attr_types=None):
ignore_attr_types = {v.lower() for v in (ignore_attr_types or [])}
case_ignore_attr_types = {v.lower() for v in (case_ignore_attr_types or [])}
modlist = []
attrtype_lower_map = {}
for a in old_entry:
attrtype_lower_map[a.lower()] = a
for (attrtype, value) in new_entry.items():
attrtype_lower = attrtype.lower()
if (attrtype_lower in ignore_attr_types):
continue
new_value = [item for item in value if (item is not None)]
if (attrtype_lower in attrtype_lower_map):
old_value = old_entry.get(attrtype_lower_map[attrtype_lower], [])
old_value = [item for item in old_value if (item is not None)]
del attrtype_lower_map[attrtype_lower]
else:
old_value = []
if ((not old_value) and new_value):
modlist.append((ldap.MOD_ADD, attrtype, new_value))
elif (old_value and new_value):
replace_attr_value = (len(old_value) != len(new_value))
if (not replace_attr_value):
if (attrtype_lower in case_ignore_attr_types):
old_value_set = {v.lower() for v in old_value}
new_value_set = {v.lower() for v in new_value}
else:
old_value_set = set(old_value)
new_value_set = set(new_value)
replace_attr_value = (new_value_set != old_value_set)
if replace_attr_value:
modlist.append((ldap.MOD_DELETE, attrtype, None))
modlist.append((ldap.MOD_ADD, attrtype, new_value))
elif (old_value and (not new_value)):
modlist.append((ldap.MOD_DELETE, attrtype, None))
if (not ignore_oldexistent):
for (a, val) in attrtype_lower_map.items():
if (a in ignore_attr_types):
continue
attrtype = val
modlist.append((ldap.MOD_DELETE, attrtype, None))
return modlist |
def object_len(node, context: (InferenceContext | None)=None):
from astroid.objects import FrozenSet
inferred_node = real_safe_infer(node, context=context)
node_frame = node.frame()
if (isinstance(node_frame, scoped_nodes.FunctionDef) and (node_frame.name == '__len__') and isinstance(inferred_node, bases.Proxy) and (inferred_node._proxied == node_frame.parent)):
message = 'Self referential __len__ function will cause a RecursionError on line {} of {}'.format(node.lineno, node.root().file)
raise InferenceError(message)
if ((inferred_node is None) or isinstance(inferred_node, util.UninferableBase)):
raise InferenceError(node=node)
if (isinstance(inferred_node, nodes.Const) and isinstance(inferred_node.value, (bytes, str))):
return len(inferred_node.value)
if isinstance(inferred_node, (nodes.List, nodes.Set, nodes.Tuple, FrozenSet)):
return len(inferred_node.elts)
if isinstance(inferred_node, nodes.Dict):
return len(inferred_node.items)
node_type = object_type(inferred_node, context=context)
if (not node_type):
raise InferenceError(node=node)
try:
len_call = next(node_type.igetattr('__len__', context=context))
except StopIteration as e:
raise AstroidTypeError(str(e)) from e
except AttributeInferenceError as e:
raise AstroidTypeError(f"object of type '{node_type.pytype()}' has no len()") from e
inferred = len_call.infer_call_result(node, context)
if isinstance(inferred, util.UninferableBase):
raise InferenceError(node=node, context=context)
result_of_len = next(inferred, None)
if (isinstance(result_of_len, nodes.Const) and (result_of_len.pytype() == 'builtins.int')):
return result_of_len.value
if ((result_of_len is None) or (isinstance(result_of_len, bases.Instance) and result_of_len.is_subtype_of('builtins.int'))):
return 0
raise AstroidTypeError(f"'{result_of_len}' object cannot be interpreted as an integer") |
def _convert_list_type_from_XML(vs):
vlist = (vs.findall('ListItem') + vs.findall('ConfigListItem'))
l = []
for xconfig in vlist:
v = xconfig.text
if (xconfig.get('type') in CONVERT_TYPE_FROM_XML):
v = CONVERT_TYPE_FROM_XML[xconfig.get('type')](xconfig)
l.append(v)
return l |
def create_virtual_interfaces(kubecli: KrknKubernetes, nummber: int, node: str, pod_template) -> None:
pod_body = yaml.safe_load(pod_template.render(nodename=node))
kubecli.create_pod(pod_body, 'default', 300)
logging.info('Creating {0} virtual interfaces on node {1} using a pod'.format(nummber, node))
create_ifb(kubecli, nummber, 'modtools')
logging.info('Deleting pod used to create virtual interfaces')
kubecli.delete_pod('modtools', 'default') |
def main():
os.chdir(top_dir)
collect_interval = get_setting('schedule:collect_interval')
submit_interval = get_setting('schedule:submit_interval')
collect_stamp_file = os.path.join(var_dir, 'collect-stamp')
submit_stamp_file = os.path.join(var_dir, 'submit-stamp')
do_collect = check_stamp(collect_stamp_file, collect_interval)
do_submit = (do_collect or (check_stamp(submit_stamp_file, submit_interval) and glob.glob(os.path.join(collected_dir, '*'))))
successful_update = False
try:
subprocess.check_output((bin_path('update'),), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if (e.returncode == 42):
successful_update = do_collect = do_submit = True
else:
log.error('update failed:\n{}', e.output.decode('utf8'))
if (not successful_update):
try:
subprocess.check_output((bin_path('verify'),))
except subprocess.CalledProcessError as e:
log.error('verification failed:\n{}', e.output.decode('utf8'))
if do_collect:
try:
subprocess.check_output((bin_path('collect'), '--plugins'), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
log.error('collect --plugins failed:\n{}', e.output.decode('utf8'))
if do_submit:
try:
subprocess.check_output((bin_path('submit'),), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
log.error('submit failed:\n{}', e.output.decode('utf8'))
else:
update_stamp(submit_stamp_file)
if do_collect:
try:
subprocess.check_output((bin_path('collect'), '--commands'), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
log.error('collect --commands failed:\n{}', e.output.decode('utf8'))
update_stamp(collect_stamp_file)
try:
subprocess.check_output((bin_path('submit'),), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
log.error('submit failed:\n{}', e.output.decode('utf8'))
else:
update_stamp(submit_stamp_file) |
class Migration(migrations.Migration):
dependencies = [('schedule', '0033_new_schedule_item_type_talk')]
operations = [migrations.AddField(model_name='scheduleitem', name='attendees_total_capacity', field=models.PositiveIntegerField(blank=True, help_text='Maximum capacity for this event. Leave blank to not limit attendees.', null=True, verbose_name='Attendees total capacity')), migrations.CreateModel(name='ScheduleItemAttendee', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('user_id', models.IntegerField(verbose_name='user')), ('schedule_item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attendees', to='schedule.scheduleitem', verbose_name='schedule item'))], options={'unique_together': {('user_id', 'schedule_item')}})] |
class ResBlock(nn.Module):
def __init__(self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, act_layer=nn.GELU, init_values=0.0001, drop=0.0, drop_path=0.0):
super().__init__()
channel_dim = int((dim * mlp_ratio))
self.norm1 = norm_layer(dim)
self.linear_tokens = nn.Linear(seq_len, seq_len)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop)
self.ls1 = nn.Parameter((init_values * torch.ones(dim)))
self.ls2 = nn.Parameter((init_values * torch.ones(dim)))
def forward(self, x):
x = (x + self.drop_path((self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2))))
x = (x + self.drop_path((self.ls2 * self.mlp_channels(self.norm2(x)))))
return x |
class RepBlock(nn.Module):
def __init__(self, in_channels, out_channels, n=1, block=RepVGGBlock, basic_block=RepVGGBlock):
super().__init__()
self.conv1 = block(in_channels, out_channels)
self.block = (nn.Sequential(*(block(out_channels, out_channels) for _ in range((n - 1)))) if (n > 1) else None)
if (block == BottleRep):
self.conv1 = BottleRep(in_channels, out_channels, basic_block=basic_block, weight=True)
n = (n // 2)
self.block = (nn.Sequential(*(BottleRep(out_channels, out_channels, basic_block=basic_block, weight=True) for _ in range((n - 1)))) if (n > 1) else None)
def forward(self, x):
x = self.conv1(x)
if (self.block is not None):
x = self.block(x)
return x |
def format_item(format_spec, item, defaults=None):
template_engine = getattr(format_spec, '__engine__', None)
if ((template_engine == 'tempita') or ((not template_engine) and format_spec.startswith('{{'))):
namespace = dict(headers=(not bool(item)))
if item:
namespace['d'] = item
else:
namespace['d'] = Bunch()
for name in engine.FieldDefinition.FIELDS:
namespace['d'][name] = name.upper()
namespace.update(((name[4:], (lambda x, m=method: str(x).rjust(len(str(m(0)))))) for (name, method) in globals().items() if name.startswith('fmt_')))
return expand_template(format_spec, namespace)
else:
format_spec = getattr(format_spec, 'fmt', format_spec)
if (item is None):
format_spec = re.sub('(\\([_.a-zA-Z0-9]+\\)[-#+0 ]?[0-9]*?)[.0-9]*[diouxXeEfFgG]', (lambda m: (m.group(1) + 's')), format_spec)
return (format_spec % OutputMapping(item, defaults)) |
def distros_for_location(location, basename, metadata=None):
if basename.endswith('.egg.zip'):
basename = basename[:(- 4)]
if (basename.endswith('.egg') and ('-' in basename)):
return [Distribution.from_location(location, basename, metadata)]
if (basename.endswith('.whl') and ('-' in basename)):
wheel = Wheel(basename)
if (not wheel.is_compatible()):
return []
return [Distribution(location=location, project_name=wheel.project_name, version=wheel.version, precedence=(EGG_DIST + 1))]
if basename.endswith('.exe'):
(win_base, py_ver, platform) = parse_bdist_wininst(basename)
if (win_base is not None):
return interpret_distro_name(location, win_base, metadata, py_ver, BINARY_DIST, platform)
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:(- len(ext))]
return interpret_distro_name(location, basename, metadata)
return [] |
def test_cannot_send_a_grant_if_grants_deadline_do_not_exists(graphql_client, user, conference, grant_factory):
assert (list(conference.deadlines.all()) == [])
graphql_client.force_login(user)
response = _send_grant(graphql_client, grant_factory, conference)
assert (not response.get('errors'))
assert (response['data']['sendGrant']['__typename'] == 'GrantErrors')
assert (response['data']['sendGrant']['errors']['nonFieldErrors'] == ['The grants form is not open!']) |
class MF(BaseEstimator, TransformerMixin):
def __init__(self, num_users, num_items, pretrain_flag, hidden_factor, epoch, batch_size, learning_rate, lamda_bilinear, optimizer_type, verbose, layers, activation_function, keep_prob, save_file, random_seed=2016):
self.batch_size = batch_size
self.learning_rate = learning_rate
self.hidden_factor = hidden_factor
self.save_file = save_file
self.pretrain_flag = pretrain_flag
self.num_users = num_users
self.num_items = num_items
self.lamda_bilinear = lamda_bilinear
self.epoch = epoch
self.random_seed = random_seed
self.optimizer_type = optimizer_type
self.verbose = verbose
self.layers = layers
self.activation_function = activation_function
self.keep_prob = np.array(keep_prob)
self.no_dropout = np.array([1 for i in xrange(len(keep_prob))])
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
self.user = tf.placeholder(tf.int32, shape=[None])
self.item_pos = tf.placeholder(tf.int32, shape=[None])
self.item_neg = tf.placeholder(tf.int32, shape=[None])
self.dropout_keep = tf.placeholder(tf.float32, shape=[None])
self.train_phase = tf.placeholder(tf.bool)
self.weights = self._initialize_weights()
user_embedding = tf.nn.embedding_lookup(self.weights['user_embeddings'], self.user)
pos_embedding = tf.nn.embedding_lookup(self.weights['item_embeddings'], self.item_pos)
self.pos = tf.reduce_sum(tf.multiply(user_embedding, pos_embedding), 1)
neg_embedding = tf.nn.embedding_lookup(self.weights['item_embeddings'], self.item_neg)
self.neg = tf.reduce_sum(tf.multiply(user_embedding, neg_embedding), 1)
self.loss = (- tf.log(tf.sigmoid((self.pos - self.neg))))
self.loss = tf.reduce_sum(self.loss)
regularization = (tf.contrib.layers.l2_regularizer(self.lamda_bilinear)(self.weights['user_embeddings']) + tf.contrib.layers.l2_regularizer(self.lamda_bilinear)(self.weights['item_embeddings']))
self.loss = tf.add(self.loss, regularization)
if (self.optimizer_type == 'AdamOptimizer'):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08).minimize(self.loss)
elif (self.optimizer_type == 'AdagradOptimizer'):
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate, initial_accumulator_value=1e-08).minimize(self.loss)
elif (self.optimizer_type == 'GradientDescentOptimizer'):
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
elif (self.optimizer_type == 'MomentumOptimizer'):
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).minimize(self.loss)
self.saver = tf.train.Saver()
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if (self.verbose > 0):
print(('#params: %d' % total_parameters))
def _initialize_weights(self):
all_weights = dict()
all_weights['user_embeddings'] = tf.Variable(tf.random_normal([self.num_users, self.hidden_factor], 0.0, 0.05), name='user_embeddings')
all_weights['item_embeddings'] = tf.Variable(tf.random_normal([self.num_items, self.hidden_factor], 0.0, 0.05), name='item_embeddings')
num_layer = len(self.layers)
return all_weights
def partial_fit(self, data):
feed_dict = {self.user: data['user'], self.item_pos: data['positive'], self.item_neg: data['negative'], self.dropout_keep: self.keep_prob, self.train_phase: True}
(loss, opt) = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict)
return loss
def get_random_block_from_data(self, train_data, batch_size):
(user, positive, negative) = ([], [], [])
all_items = data.items.values()
while (len(user) < batch_size):
index = np.random.randint(0, len(train_data['User']))
user.append(train_data['User'][index])
positive.append(train_data['Item'][index])
user_id = train_data['User'][index]
pos = data.user_positive_list[user_id]
neg = np.random.randint(len(all_items))
while (neg in pos):
neg = np.random.randint(len(all_items))
negative.append(neg)
return {'user': user, 'positive': positive, 'negative': negative}
def train(self, Train_data):
for epoch in range(self.epoch):
total_loss = 0
t1 = time()
total_batch = int((len(Train_data['User']) / self.batch_size))
for i in range(total_batch):
batch_xs = self.get_random_block_from_data(Train_data, self.batch_size)
loss = self.partial_fit(batch_xs)
total_loss = (total_loss + loss)
t2 = time()
print(('the total loss in %d th iteration is: %f' % (epoch, total_loss)))
if (self.pretrain_flag < 0):
print('Save model to file as pretrain.')
self.saver.save(self.sess, self.save_file)
def evaluate(self):
self.graph.finalize()
count = [0, 0, 0, 0, 0]
rank = [[], [], [], [], []]
for index in range(len(data.Test_data['User'])):
user = data.Test_data['User'][index]
scores = model.get_scores_per_user(user)
true_item_id = data.Test_data['Item'][index]
true_item_score = scores[true_item_id]
visited = data.user_positive_list[user]
scores = np.delete(scores, visited)
sorted_scores = sorted(scores, reverse=True)
label = [sorted_scores[4]]
label.append([sorted_scores[9]])
label.append([sorted_scores[14]])
label.append([sorted_scores[19]])
label.append([sorted_scores[24]])
if (true_item_score >= label[0]):
count[0] = (count[0] + 1)
rank[0].append((sorted_scores.index(true_item_score) + 1))
if (true_item_score >= label[1]):
count[1] = (count[1] + 1)
rank[1].append((sorted_scores.index(true_item_score) + 1))
if (true_item_score >= label[2]):
count[2] = (count[2] + 1)
rank[2].append((sorted_scores.index(true_item_score) + 1))
if (true_item_score >= label[3]):
count[3] = (count[3] + 1)
rank[3].append((sorted_scores.index(true_item_score) + 1))
if (true_item_score >= label[4]):
count[4] = (count[4] + 1)
rank[4].append((sorted_scores.index(true_item_score) + 1))
for i in range(5):
mrr = 0
ndcg = 0
hit_rate = (float(count[i]) / len(data.Test_data['User']))
for item in rank[i]:
mrr = (mrr + (float(1.0) / item))
ndcg = (ndcg + (float(1.0) / np.log2((item + 1))))
mrr = (mrr / len(data.Test_data['User']))
ndcg = (ndcg / len(data.Test_data['User']))
k = ((i + 1) * 5)
print(('top:%d' % k))
print(('the Hit Rate is: %f' % hit_rate))
print(('the MRR is: %f' % mrr))
print(('the NDCG is: %f' % ndcg))
def get_scores_per_user(self, user_feature):
scorelist = []
all_items = data.items.values()
if ((len(all_items) % self.batch_size) == 0):
batch_count = (len(all_items) / self.batch_size)
flag = 0
else:
batch_count = math.ceil((len(all_items) / self.batch_size))
flag = 1
j = 0
for i in range(int(batch_count)):
(X_user, X_item) = ([], [])
if ((flag == 1) and (i == (batch_count - 1))):
k = len(all_items)
else:
k = (j + self.batch_size)
for itemID in range(j, k):
X_user.append(user_feature)
X_item.append(itemID)
feed_dict = {self.user: X_user, self.item_pos: X_item, self.dropout_keep: self.no_dropout, self.train_phase: False}
scores = self.sess.run(self.pos, feed_dict=feed_dict)
scores = scores.reshape(len(X_user))
scorelist = np.append(scorelist, scores)
j = (j + self.batch_size)
return scorelist |
class ServiceHandler(AbstractServiceHandler):
_search_base = '
_recent_list = '
def __init__(self):
super().__init__('nyaa', 'Nyaa', True)
def get_all_episodes(self, stream, **kwargs):
info('Getting live episodes for Nyaa/{}'.format(stream.show_key))
episode_datas = self._get_feed_episodes(stream.show_key, **kwargs)
episodes = []
for episode_data in episode_datas:
if _is_valid_episode(episode_data):
try:
episode = _digest_episode(episode_data)
if (episode is not None):
episodes.append(episode)
except:
exception('Problem digesting episode for Crunchyroll/{}'.format(stream.show_key))
if (len(episode_datas) > 0):
debug(' {} episodes found, {} valid'.format(len(episode_datas), len(episodes)))
else:
debug(' No episodes found')
return episodes
def get_recent_episodes(self, streams, **kwargs):
torrents = self._get_recent_torrents(**kwargs)
episodes = dict()
for torrent in torrents:
found_streams = self._find_matching_stream(torrent, streams)
if (not _is_valid_episode(torrent)):
debug('Torrent excluded (not a valid episode format)')
continue
for stream in found_streams:
try:
episode = _digest_episode(torrent)
if (episode is not None):
show_episodes = episodes.get(stream, list())
show_episodes.append(episode)
debug(f'Adding episode {episode.number} for show {stream.show.id}')
episodes[stream] = show_episodes
except:
exception(f'Problem digesting torrent {torrent.id}')
return episodes
def _find_matching_stream(self, torrent, streams):
debug(f'Searching matching stream for torrent {torrent.title}')
found_streams = list()
for stream in streams:
show = stream.show
names = (([show.name] + show.aliases) + [stream.show_key])
for name in names:
words_show = set(_normalize_show_name(name).split())
words_torrent = set(_normalize_show_name(torrent.title).split())
if words_show.issubset(words_torrent):
debug(f' -> MATCH')
info(f'Matching found for torrent {torrent.title}')
info(f' -> {show.name}')
found_streams.append(stream)
break
if (not found_streams):
debug(f'No matching show found for torrent {torrent.title}')
return found_streams
def _get_recent_torrents(self, **kwargs):
'\n\t\tReturns all torrents on the top of
info('Getting all recent episodes on Nyaa')
domain = self.config.get('domain', 'nyaa.si')
filter_ = self.config.get('filter', '2')
excludes = self.config.get('excluded_users', '').replace(' ', '')
url = self._recent_list.format(domain=domain, filter=filter_, excludes=excludes)
response = self.request(url, rss=True, **kwargs)
if (response is None):
error('Cannot get latest show for Nyaa')
return list()
if (not _verify_feed(response)):
warning('Parsed feed could not be verified, may have unexpected results')
return response.get('entries', list())
def _get_feed_episodes(self, show_key, **kwargs):
info('Getting episodes for Nyaa/{}'.format(show_key))
if (('domain' not in self.config) or (not self.config['domain'])):
error(' Domain not specified in config')
return list()
query = re.sub('[`~!#$%^&*()+=:;,.<>?/|"]+', ' ', show_key)
query = re.sub('season', ' ', query, flags=re.I)
query = re.sub(' +', ' ', query)
query = re.sub('(?:[^ ])-', ' ', query)
debug(' query={}'.format(query))
query = url_quote(query, safe='', errors='ignore')
domain = self.config.get('domain', 'nyaa.si')
filter_ = self.config.get('filter', '2')
excludes = self.config.get('excluded_users', '').replace(' ', '')
url = self._search_base.format(domain=domain, filter=filter_, excludes=excludes, q=query)
response = self.request(url, rss=True, **kwargs)
if (response is None):
error('Cannot get latest show for Nyaa/{}'.format(show_key))
return list()
if (not _verify_feed(response)):
warning('Parsed feed could not be verified, may have unexpected results')
return response.get('entries', list())
def get_stream_link(self, stream):
return None
def get_stream_info(self, stream, **kwargs):
return None
def extract_show_key(self, url):
return url
def get_seasonal_streams(self, **kwargs):
return list() |
class EG3DDataset(BaseDataset):
def __init__(self, root_dir, file_format='zip', annotation_path=None, annotation_meta=None, annotation_format='json', max_samples=(- 1), mirror=False, transform_kwargs=None, use_label=True, num_classes=None, use_pose=True, pose_meta='dataset.json'):
super().__init__(root_dir=root_dir, file_format=file_format, annotation_path=annotation_path, annotation_meta=annotation_meta, annotation_format=annotation_format, max_samples=max_samples, mirror=mirror, transform_kwargs=transform_kwargs)
self.dataset_classes = 0
self.num_classes = 0
self.use_label = False
item_sample = self.items[0]
if (isinstance(item_sample, (list, tuple)) and (len(item_sample) > 1)):
labels = [int(item[1]) for item in self.items]
self.dataset_classes = (max(labels) + 1)
self.use_label = use_label
if self.use_label:
if (num_classes is None):
self.num_classes = self.dataset_classes
else:
self.num_classes = int(num_classes)
assert (self.num_classes > 0)
else:
self.num_classes = 0
self.use_pose = use_pose
if use_pose:
fp = self.reader.open_anno_file(root_dir, pose_meta)
self.poses = self._load_raw_poses(fp)
def _load_raw_poses(self, fp):
poses = json.load(fp)['labels']
poses = dict(poses)
poses = [poses[fname.replace('\\', '/')] for fname in self.items]
poses = np.array(poses)
poses = poses.astype({1: np.int64, 2: np.float32}[poses.ndim])
return poses
def get_pose(self, idx):
pose = self.poses[idx]
return pose.copy()
def get_raw_data(self, idx):
do_mirror = (self.mirror and (idx >= (self.num_samples // 2)))
if do_mirror:
idx = (idx - (self.num_samples // 2))
if self.use_label:
(image_path, raw_label) = self.items[idx][:2]
raw_label = int(raw_label)
label = raw_label_to_one_hot(raw_label, self.num_classes)
else:
image_path = self.items[idx]
if self.use_pose:
pose = self.poses[idx].copy()
buffer = np.frombuffer(self.fetch_file(image_path), dtype=np.uint8)
idx = np.array(idx)
do_mirror = np.array(do_mirror)
if self.use_label:
raw_label = np.array(raw_label)
return [idx, do_mirror, buffer, pose, raw_label, label]
return [idx, do_mirror, buffer, pose]
def num_raw_outputs(self):
if self.use_label:
return 6
return 4
def parse_transform_config(self):
image_size = self.transform_kwargs.get('image_size')
image_channels = self.transform_kwargs.setdefault('image_channels', 3)
min_val = self.transform_kwargs.setdefault('min_val', (- 1.0))
max_val = self.transform_kwargs.setdefault('max_val', 1.0)
use_square = self.transform_kwargs.setdefault('use_square', True)
center_crop = self.transform_kwargs.setdefault('center_crop', True)
self.transform_config = dict(decode=dict(transform_type='Decode', image_channels=image_channels, return_square=use_square, center_crop=center_crop), resize=dict(transform_type='Resize', image_size=image_size), normalize=dict(transform_type='Normalize', min_val=min_val, max_val=max_val))
def transform(self, raw_data, use_dali=False):
if self.use_label:
(idx, do_mirror, buffer, pose, raw_label, label) = raw_data
else:
(idx, do_mirror, buffer, pose) = raw_data
raw_image = self.transforms['decode'](buffer, use_dali=use_dali)
raw_image = self.transforms['resize'](raw_image, use_dali=use_dali)
raw_image = self.mirror_aug(raw_image, do_mirror, use_dali=use_dali)
image = self.transforms['normalize'](raw_image, use_dali=use_dali)
if self.use_label:
return [idx, raw_image, image, raw_label, label, pose]
return [idx, raw_image, image, pose]
def output_keys(self):
if self.use_label:
return ['index', 'raw_image', 'image', 'raw_label', 'label', 'pose']
return ['index', 'raw_image', 'image', 'pose']
def info(self):
dataset_info = super().info()
dataset_info['Dataset classes'] = self.dataset_classes
dataset_info['Use label'] = self.use_label
if self.use_label:
dataset_info['Num classes for training'] = self.num_classes
return dataset_info |
def uniform_points_on_sphere(angle_sampling, radius=1):
elevation = np.linspace((- 90), 90, angle_sampling)
azimuth = np.linspace((- 180), 180, angle_sampling, endpoint=False)
(elevation, azimuth) = np.meshgrid(elevation, azimuth)
keep = (elevation != (- 90))
keep[np.argmin(keep)] = True
azimuth = azimuth[keep]
elevation = elevation[keep]
keep = (elevation != 90)
keep[np.argmin(keep)] = True
azimuth = azimuth[keep]
elevation = elevation[keep]
elevation = elevation.flatten()
azimuth = azimuth.flatten()
n_points = len(elevation)
distance = np.full((n_points,), radius, dtype=float)
points = points_from_angles(distance, elevation, azimuth)
return points |
class CortexMScb(QlPeripheral):
def enable(self, IRQn):
if (IRQn == IRQ.USAGE_FAULT):
self.instance.SHCSR |= (1 << 18)
if (IRQn == IRQ.BUS_FAULT):
self.instance.SHCSR |= (1 << 17)
if (IRQn == IRQ.MEMORY_MANAGEMENT_FAULT):
self.instance.SHCSR |= (1 << 16)
def disable(self, IRQn):
if (IRQn == IRQ.USAGE_FAULT):
self.instance.SHCSR &= (~ (1 << 18))
if (IRQn == IRQ.BUS_FAULT):
self.instance.SHCSR &= (~ (1 << 17))
if (IRQn == IRQ.MEMORY_MANAGEMENT_FAULT):
self.instance.SHCSR &= (~ (1 << 16))
def get_enable(self, IRQn):
if (IRQn == IRQ.USAGE_FAULT):
return ((self.instance.SHCSR >> 18) & 1)
if (IRQn == IRQ.BUS_FAULT):
return ((self.instance.SHCSR >> 17) & 1)
if (IRQn == IRQ.MEMORY_MANAGEMENT_FAULT):
return ((self.instance.SHCSR >> 16) & 1)
return 1
def set_pending(self, IRQn):
if (IRQn == IRQ.NMI):
self.instance.ICSR |= (1 << 31)
if (IRQn == IRQ.PENDSV):
self.instance.ICSR |= (3 << 27)
if (IRQn == IRQ.SYSTICK):
self.instance.ICSR |= (3 << 25)
if (IRQn == IRQ.MEMORY_MANAGEMENT_FAULT):
self.instance.SHCSR |= (1 << 13)
if (IRQn == IRQ.BUS_FAULT):
self.instance.SHCSR |= (1 << 14)
if (IRQn == IRQ.USAGE_FAULT):
self.instance.SHCSR |= (1 << 12)
if (IRQn == IRQ.SVCALL):
self.instance.SHCSR |= (1 << 15)
def clear_pending(self, IRQn):
if (IRQn == IRQ.NMI):
self.instance.ICSR &= (~ (1 << 31))
if (IRQn == IRQ.PENDSV):
self.instance.ICSR &= (~ (3 << 27))
if (IRQn == IRQ.SYSTICK):
self.instance.ICSR &= (~ (3 << 25))
if (IRQn == IRQ.MEMORY_MANAGEMENT_FAULT):
self.instance.SHCSR &= (~ (1 << 13))
if (IRQn == IRQ.BUS_FAULT):
self.instance.SHCSR &= (~ (1 << 14))
if (IRQn == IRQ.USAGE_FAULT):
self.instance.SHCSR &= (~ (1 << 12))
if (IRQn == IRQ.SVCALL):
self.instance.SHCSR &= (~ (1 << 15))
def get_pending(self, IRQn):
if (IRQn == IRQ.NMI):
return ((self.instance.ICSR >> 31) & 1)
if (IRQn == IRQ.PENDSV):
return ((self.instance.ICSR >> 28) & 1)
if (IRQn == IRQ.SYSTICK):
return ((self.instance.ICSR >> 26) & 1)
if (IRQn == IRQ.MEMORY_MANAGEMENT_FAULT):
return ((self.instance.SHCSR >> 13) & 1)
if (IRQn == IRQ.BUS_FAULT):
return ((self.instance.SHCSR >> 14) & 1)
if (IRQn == IRQ.USAGE_FAULT):
return ((self.instance.SHCSR >> 12) & 1)
if (IRQn == IRQ.SVCALL):
return ((self.instance.SHCSR >> 15) & 1)
return 0
def get_priority(self, IRQn):
return self.instance.SHP[((IRQn & 15) - 4)]
()
def read(self, offset: int, size: int) -> int:
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, (ctypes.addressof(self.instance) + offset), size)
return int.from_bytes(buf.raw, byteorder='little')
()
def write(self, offset: int, size: int, value: int):
if (offset == self.struct.ICSR.offset):
if ((value >> 28) & 1):
self.ql.hw.nvic.set_pending(IRQ.PENDSV)
data = value.to_bytes(size, 'little')
ctypes.memmove((ctypes.addressof(self.instance) + offset), data, size) |
class PerFutureTrade(PerContract):
def __init__(self, cost=DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE):
super(PerFutureTrade, self).__init__(cost=0, exchange_fee=cost, min_trade_cost=0)
self._cost_per_trade = self._exchange_fee
def __repr__(self):
if isinstance(self._cost_per_trade, DummyMapping):
cost_per_trade = self._cost_per_trade['dummy key']
else:
cost_per_trade = '<varies>'
return '{class_name}(cost_per_trade={cost_per_trade})'.format(class_name=self.__class__.__name__, cost_per_trade=cost_per_trade) |
class ChangeObjectStates(StateChanger):
def __init__(self, properties_data):
self.properties_data = properties_data
def apply_changes(self, state: EnvironmentState, **kwargs):
for node in state.get_nodes():
for p in (node.properties & _PROPERTY_STATES.keys()):
possible_states = _PROPERTY_STATES[p]
node.states -= set(possible_states)
node.states.add(random.choice(possible_states)) |
def get_norm(norm, out_channels):
if (norm is None):
return None
if isinstance(norm, str):
if (len(norm) == 0):
return None
norm = {'BN': BatchNorm2d, 'SyncBN': (NaiveSyncBatchNorm if (env.TORCH_VERSION <= (1, 5)) else nn.SyncBatchNorm), 'FrozenBN': FrozenBatchNorm2d, 'GN': (lambda channels: nn.GroupNorm(32, channels)), 'nnSyncBN': nn.SyncBatchNorm, 'naiveSyncBN': NaiveSyncBatchNorm, 'naiveSyncBN_N': (lambda channels: NaiveSyncBatchNorm(channels, stats_mode='N'))}[norm]
return norm(out_channels) |
_datapipe('load_from_bz2')
class Bz2FileLoaderIterDataPipe(IterDataPipe[Tuple[(str, BufferedIOBase)]]):
def __init__(self, datapipe: Iterable[Tuple[(str, BufferedIOBase)]], length: int=(- 1)) -> None:
super().__init__()
self.datapipe: Iterable[Tuple[(str, BufferedIOBase)]] = datapipe
self.length: int = length
def __iter__(self) -> Iterator[Tuple[(str, BufferedIOBase)]]:
for data in self.datapipe:
validate_pathname_binary_tuple(data)
(pathname, data_stream) = data
try:
extracted_fobj = bz2.open(data_stream, mode='rb')
new_pathname = pathname.rstrip('.bz2')
(yield (new_pathname, StreamWrapper(extracted_fobj, data_stream, name=new_pathname)))
except Exception as e:
warnings.warn(f'Unable to extract files from corrupted bzip2 stream {pathname} due to: {e}, abort!')
raise e
finally:
if isinstance(data_stream, StreamWrapper):
data_stream.autoclose()
def __len__(self) -> int:
if (self.length == (- 1)):
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length |
def pyrocko_station_from_channels(nsl, channels, inconsistencies='warn'):
pos = (lat, lon, elevation, depth) = channels[0].position_values
if (not all(((pos == x.position_values) for x in channels))):
info = '\n'.join(((' %s: %s' % (x.code, x.position_values)) for x in channels))
mess = ('encountered inconsistencies in channel lat/lon/elevation/depth for %s.%s.%s: \n%s' % (nsl + (info,)))
if (inconsistencies == 'raise'):
raise InconsistentChannelLocations(mess)
elif (inconsistencies == 'warn'):
logger.warning(mess)
logger.warning(' -> using mean values')
apos = num.array([x.position_values for x in channels], dtype=float)
(mlat, mlon, mele, mdep) = (num.nansum(apos, axis=0) / num.sum(num.isfinite(apos), axis=0))
groups = {}
for channel in channels:
if (channel.code not in groups):
groups[channel.code] = []
groups[channel.code].append(channel)
pchannels = []
for code in sorted(groups.keys()):
data = [(channel.code, value_or_none(channel.azimuth), value_or_none(channel.dip)) for channel in groups[code]]
(azimuth, dip) = util.consistency_merge(data, message='channel orientation values differ:', error=inconsistencies)
pchannels.append(pyrocko.model.Channel(code, azimuth=azimuth, dip=dip))
return pyrocko.model.Station(*nsl, lat=mlat, lon=mlon, elevation=mele, depth=mdep, channels=pchannels) |
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument('-n', '--name', type=str, const=True, default='', nargs='?', help='postfix for logdir')
parser.add_argument('-r', '--resume', type=str, const=True, default='', nargs='?', help='resume from logdir or checkpoint in logdir')
parser.add_argument('-b', '--base', nargs='*', metavar='base_config.yaml', help='paths to base configs. Loaded from left-to-right. Parameters can be overwritten or added with command-line options of the form `--key value`.', default=list())
parser.add_argument('-t', '--train', type=str2bool, const=True, default=False, nargs='?', help='train')
parser.add_argument('--no-test', type=str2bool, const=True, default=False, nargs='?', help='disable test')
parser.add_argument('-p', '--project', help='name of new or path to existing project')
parser.add_argument('-d', '--debug', type=str2bool, nargs='?', const=True, default=False, help='enable post-mortem debugging')
parser.add_argument('-s', '--seed', type=int, default=23, help='seed for seed_everything')
parser.add_argument('-f', '--postfix', type=str, default='', help='post-postfix for default name')
parser.add_argument('-l', '--logdir', type=str, default='logs', help='directory for logging dat shit')
parser.add_argument('--scale_lr', type=str2bool, nargs='?', const=False, default=False, help='scale base-lr by ngpu * batch_size * n_accumulate')
parser.add_argument('--n_accumulate', type=int, default=1, help='Number of accumulate iters')
parser.add_argument('--datadir_in_name', type=str2bool, nargs='?', const=True, default=True, help='Prepend the final directory in the data_root to the output directory name')
parser.add_argument('--max_training_steps', type=int, required=True, help='Number of training steps to run')
parser.add_argument('--token', type=str, required=True, help='Unique token you want to represent your trained model. Ex: firstNameLastName.')
parser.add_argument('--token_only', type=str2bool, const=True, default=False, nargs='?', help='Train only using the token and no class.')
parser.add_argument('--actual_resume', type=str, required=True, help='Path to model to actually resume from')
parser.add_argument('--data_root', type=str, required=True, help='Path to directory with training images')
parser.add_argument('--reg_data_root', type=str, required=False, help='Path to directory with regularization images')
parser.add_argument('--embedding_manager_ckpt', type=str, default='', help='Initialize embedding manager from a checkpoint')
parser.add_argument('--class_word', type=str, required=False, help="Match class_word to the category of images you want to train. Example: 'man', 'woman', or 'dog'.")
parser.add_argument('--init_words', type=str, help='Comma separated list of words used to initialize the embeddigs for training.')
return parser |
def delete_vm(call, vm_name):
refresh_token = user_dict[call.from_user.id].refresh_token
subscription_id = user_dict[call.from_user.id].subscription_id
bot.edit_message_text(text=f''' <b> VM</b>
<code>{vm_name}</code> ...''', chat_id=call.from_user.id, message_id=call.message.message_id, parse_mode='HTML')
az_rg = ResourceGroup(refresh_token, subscription_id)
az_rg.delete(vm_name.replace('-vm', '-rg'))
bot.edit_message_text(text=f''' <b> VM</b>
<code>{vm_name}</code> ''', chat_id=call.from_user.id, message_id=call.message.message_id, parse_mode='HTML') |
def strain_calculation_parameters(substrate_material, layer_material, should_print=False, SO=False):
sub = substrate_material
mat = layer_material
k = State()
k.av = abs(mat.a_v)
k.ac = mat.a_c
k.b = mat.b
k.C11 = mat.c11
k.C12 = mat.c12
if should_print:
print(sub, mat)
k.a0 = sub.lattice_constant
k.a = mat.lattice_constant
k.epsilon = ((k.a0 - k.a) / k.a)
k.epsilon_perp = ((((- 2) * k.C12) / k.C11) * k.epsilon)
k.e_xx = k.epsilon
k.e_yy = k.epsilon
k.e_zz = k.epsilon_perp
k.Tre = ((k.e_xx + k.e_yy) + k.e_zz)
k.Pe = ((- k.av) * k.Tre)
k.Qe = (((- k.b) / 2) * ((k.e_xx + k.e_yy) - (2 * k.e_zz)))
k.cb_hydrostatic_shift = (k.ac * k.Tre)
k.vb_hydrostatic_shift = (k.av * k.Tre)
k.vb_shear_splitting = (((2 * k.b) * (1 + ((2 * k.C12) / k.C11))) * k.epsilon)
k.delta_Ec = (k.ac * k.Tre)
if should_print:
print(k.ac, k.Tre)
k.delta_Ehh = ((- k.Pe) - k.Qe)
k.delta_Elh = ((- k.Pe) + k.Qe)
k.delta_Eso = 0.0
if SO:
k.delta = mat.spin_orbit_splitting
shift = (((k.delta ** 2) + ((2 * k.delta) * k.Qe)) + (9 * (k.Qe ** 2)))
k.delta_Elh = ((- k.Pe) + (0.5 * ((k.Qe - k.delta) + np.sqrt(shift))))
k.delta_Eso = ((- k.Pe) + (0.5 * ((k.Qe - k.delta) - np.sqrt(shift))))
strain_calculation_asserts(k, should_print=should_print)
if should_print:
print()
print('Lattice:')
print('a0', k.a0)
print('a', k.a)
print()
print('Deformation potentials:')
print('ac = ', solcore.asUnit(k.ac, 'eV'))
print('av = ', solcore.asUnit(k.av, 'eV'))
print('ac - av = ', solcore.asUnit((k.ac - k.av), 'eV'))
print('b = ', solcore.asUnit(k.b, 'eV'))
print()
print('Matrix elements from elastic stiffness tensor:')
print('C_11 = ', solcore.asUnit(k.C11, 'GPa'))
print('C_12 = ', solcore.asUnit(k.C12, 'GPa'))
print()
print('Strain fractions:')
print('e_xx = e_yy = epsilon = ', k.epsilon)
print('e_zz = epsilon_perp = ', k.epsilon_perp)
print('e_xx + e_yy + e_zz = Tre = ', k.Tre)
print()
print('Shifts and splittings:')
print('Pe = -av * Tre = ', solcore.asUnit(k.Pe, 'eV'))
print('Qe = -b/2*(e_xx + e_yy - 2*e_zz) = ', solcore.asUnit(k.Qe, 'eV'))
print('dEc = ac * Tre = ', solcore.asUnit(k.delta_Ec, 'eV'))
print('dEhh = av * Tre + b[1 + 2*C_11/C_12]*epsilon = -Pe - Qe = ', solcore.asUnit(k.delta_Ehh, 'eV'))
print('dElh = av * Tre - b[1 + 2*C_11/C_12]*epsilon = -Pe + Qe = ', solcore.asUnit(k.delta_Elh, 'eV'))
print()
return k |
def test_starting_location_world_select(skip_qtbot, preset_manager):
base = preset_manager.default_preset_for_game(RandovaniaGame.METROID_PRIME_ECHOES).get_preset()
preset = dataclasses.replace(base, uuid=uuid.UUID('b41fde84-1f57-4b79-8cd6-3e5a78077fa6'))
options = MagicMock()
editor = PresetEditor(preset, options)
window = PresetMetroidStartingArea(editor, default_database.game_description_for(preset.game), MagicMock())
skip_qtbot.addWidget(window)
checkbox_list = window._starting_location_for_region
window.on_preset_changed(editor.create_custom_preset_with())
assert (len(checkbox_list) == 10)
temple_grounds_checkbox = checkbox_list['Temple Grounds']
assert (temple_grounds_checkbox.checkState() == QtCore.Qt.CheckState.PartiallyChecked)
skip_qtbot.mouseClick(temple_grounds_checkbox, QtCore.Qt.MouseButton.LeftButton)
assert (temple_grounds_checkbox.checkState() == QtCore.Qt.CheckState.Checked)
assert (len(editor.configuration.starting_location.locations) == 39)
skip_qtbot.mouseClick(temple_grounds_checkbox, QtCore.Qt.MouseButton.LeftButton)
assert (temple_grounds_checkbox.checkState() == QtCore.Qt.CheckState.Unchecked)
assert (len(editor.configuration.starting_location.locations) == 0)
skip_qtbot.mouseClick(temple_grounds_checkbox, QtCore.Qt.MouseButton.LeftButton)
window.on_preset_changed(editor.create_custom_preset_with())
assert (temple_grounds_checkbox.checkState() == QtCore.Qt.CheckState.Checked)
assert (len(editor.configuration.starting_location.locations) == 39) |
class ExperimentPlanner2D(ExperimentPlanner):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner2D, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = (default_data_identifier + '_2D')
self.plans_fname = join(self.preprocessed_output_folder, ('nnFormerPlans' + '_plans_2D.pkl'))
self.unet_base_num_features = 30
self.unet_max_num_filters = 512
self.unet_max_numpool = 999
self.preprocessor_name = 'PreprocessorFor2D'
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases, num_modalities, num_classes):
new_median_shape = np.round(((original_spacing / current_spacing) * original_shape)).astype(int)
dataset_num_voxels = (np.prod(new_median_shape, dtype=np.int64) * num_cases)
input_patch_size = new_median_shape[1:]
(network_numpool, net_pool_kernel_sizes, net_conv_kernel_sizes, input_patch_size, shape_must_be_divisible_by) = get_pool_and_conv_props(current_spacing[1:], input_patch_size, self.unet_featuremap_min_edge_length, self.unet_max_numpool)
estimated_gpu_ram_consumption = Generic_UNet.compute_approx_vram_consumption(input_patch_size, network_numpool, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, net_pool_kernel_sizes, conv_per_stage=self.conv_per_stage)
batch_size = int(np.floor(((Generic_UNet.use_this_for_batch_size_computation_2D / estimated_gpu_ram_consumption) * Generic_UNet.DEFAULT_BATCH_SIZE_2D)))
if (batch_size < self.unet_min_batch_size):
raise RuntimeError('This framework is not made to process patches this large. We will add patch-based 2D networks later. Sorry for the inconvenience')
max_batch_size = np.round(((self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels) / np.prod(input_patch_size, dtype=np.int64))).astype(int)
batch_size = max(1, min(batch_size, max_batch_size))
plan = {'batch_size': batch_size, 'num_pool_per_axis': network_numpool, 'patch_size': input_patch_size, 'median_patient_size_in_voxels': new_median_shape, 'current_spacing': current_spacing, 'original_spacing': original_spacing, 'pool_op_kernel_sizes': net_pool_kernel_sizes, 'conv_kernel_sizes': net_conv_kernel_sizes, 'do_dummy_2D_data_aug': False}
return plan
def plan_experiment(self):
use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()
print('Are we using the nonzero maks for normalizaion?', use_nonzero_mask_for_normalization)
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
all_classes = self.dataset_properties['all_classes']
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
target_spacing = self.get_target_spacing()
new_shapes = np.array([((np.array(i) / target_spacing) * np.array(j)) for (i, j) in zip(spacings, sizes)])
max_spacing_axis = np.argmax(target_spacing)
remaining_axes = [i for i in list(range(3)) if (i != max_spacing_axis)]
self.transpose_forward = ([max_spacing_axis] + remaining_axes)
self.transpose_backward = [np.argwhere((np.array(self.transpose_forward) == i))[0][0] for i in range(3)]
median_shape = np.median(np.vstack(new_shapes), 0)
print('the median shape of the dataset is ', median_shape)
max_shape = np.max(np.vstack(new_shapes), 0)
print('the max shape in the dataset is ', max_shape)
min_shape = np.min(np.vstack(new_shapes), 0)
print('the min shape in the dataset is ', min_shape)
print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, ' in the bottleneck')
self.plans_per_stage = []
target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]
median_shape_transposed = np.array(median_shape)[self.transpose_forward]
print('the transposed median shape of the dataset is ', median_shape_transposed)
self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed, median_shape_transposed, num_cases=len(self.list_of_cropped_npz_files), num_modalities=num_modalities, num_classes=(len(all_classes) + 1)))
print(self.plans_per_stage)
self.plans_per_stage = self.plans_per_stage[::(- 1)]
self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))}
normalization_schemes = self.determine_normalization_scheme()
(only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class) = (None, None, None)
plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities, 'modalities': modalities, 'normalization_schemes': normalization_schemes, 'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files, 'original_spacings': spacings, 'original_sizes': sizes, 'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes), 'all_classes': all_classes, 'base_num_features': self.unet_base_num_features, 'use_mask_for_norm': use_nonzero_mask_for_normalization, 'keep_only_largest_region': only_keep_largest_connected_component, 'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class, 'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward, 'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage, 'preprocessor_name': self.preprocessor_name}
self.plans = plans
self.save_my_plans() |
class TestSpatialSvdLayerSplitandSVDPrunner():
.parametrize('model_type', ['Sequential', 'Functional'])
.parametrize('rank', [1024, 512])
def test_split_layer(self, model_type, rank):
model = get_model(model_type)
orig_conv_op = _get_layers(model, model_type)[2]
org_conv_op_shape = orig_conv_op.output_shape
layer1 = Layer(orig_conv_op, orig_conv_op.name, output_shape=org_conv_op_shape)
(split_conv_op1, split_conv_op2) = SpatialSvdModuleSplitter.split_module(model, layer=layer1, rank=rank)
split_conv_output = split_conv_op2.output_shape
assert (org_conv_op_shape == split_conv_output)
assert (len(split_conv_op2.get_weights()) == len(orig_conv_op.get_weights()))
if (len(orig_conv_op.get_weights()) > 1):
orig_bias_out = orig_conv_op.get_weights()[1]
split_bias_out = split_conv_op2.get_weights()[1]
assert np.allclose(orig_bias_out, split_bias_out, atol=0.0001)
assert (len(split_conv_op1.get_weights()) == 1)
.parametrize('model_type', ['Sequential', 'Functional'])
def test_split_layer_with_stride(self, model_type):
model = get_model(model_type)
orig_conv_op = _get_layers(model, model_type)[1]
org_conv_op_shape = orig_conv_op.output_shape
layer1 = Layer(orig_conv_op, orig_conv_op.name, output_shape=org_conv_op_shape)
(split_conv_op1, split_conv_op2) = SpatialSvdModuleSplitter.split_module(model, layer=layer1, rank=5)
split_conv_output = split_conv_op2.output_shape
assert (org_conv_op_shape == split_conv_output)
assert (len(split_conv_op2.get_weights()) == len(orig_conv_op.get_weights()))
if (len(orig_conv_op.get_weights()) > 1):
orig_bias_out = orig_conv_op.get_weights()[1]
split_bias_out = split_conv_op2.get_weights()[1]
assert np.allclose(orig_bias_out, split_bias_out, atol=0.0001)
assert (len(split_conv_op1.get_weights()) == 1)
.parametrize('model_type', ['Sequential', 'Functional'])
def test_perform_svd_and_split_layer(self, model_type):
model = get_model(model_type)
layer_db = LayerDatabase(model)
layer = layer_db.find_layer_by_name(_get_layers(model, model_type)[2].name)
org_count = len(list(layer_db._compressible_layers.values()))
splitter = SpatialSvdPruner()
splitter._perform_svd_and_split_layer(layer, 1024, layer_db)
assert (layer not in list(layer_db._compressible_layers.values()))
after_split_count = len(list(layer_db._compressible_layers.values()))
assert ((org_count + 1) == after_split_count) |
_torch
_vision
class ViTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase):
image_processing_class = (ViTImageProcessor if is_vision_available() else None)
def setUp(self):
self.image_processor_tester = ViTImageProcessingTester(self)
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, 'image_mean'))
self.assertTrue(hasattr(image_processing, 'image_std'))
self.assertTrue(hasattr(image_processing, 'do_normalize'))
self.assertTrue(hasattr(image_processing, 'do_resize'))
self.assertTrue(hasattr(image_processing, 'size'))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'height': 18, 'width': 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {'height': 42, 'width': 42})
def test_batch_feature(self):
pass
def test_call_pil(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
def test_call_numpy(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
def test_call_pytorch(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'])) |
def _validate_child_key_integrity(value: Any) -> None:
if (hasattr(value, '__iter__') and (not hasattr(value, '__len__'))):
warn(f'Did not verify key-path integrity of children in generator {value} - pass a sequence (i.e. list of finite length) in order to verify')
else:
for child in value:
if (isinstance(child, ComponentType) and (child.key is None)):
warn(f'Key not specified for child in list {child}', UserWarning)
elif (isinstance(child, Mapping) and ('key' not in child)):
child_copy = {**child, 'children': _EllipsisRepr()}
warn(f'Key not specified for child in list {child_copy}', UserWarning) |
class TestBuildDependenciesInstalled():
def test_default_all(self, hatch, temp_dir, helpers):
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
path = (temp_dir / 'my-app')
(path / 'README.md').replace((path / 'README.txt'))
project = Project(path)
config = dict(project.raw_config)
config['project']['readme'] = 'README.txt'
project.save_config(config)
with path.as_cwd():
result = hatch('project', 'metadata')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
{{
"name": "my-app",
"version": "0.0.1",
"readme": {{
"content-type": "text/plain",
"text": "{read_readme(path)}"
}},
"requires-python": ">=3.8",
"license": "MIT",
"authors": [
{{
"name": "Foo Bar",
"email": ""
}}
],
"classifiers": [
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy"
],
"urls": {{
"Documentation": "
"Issues": "
"Source": "
}}
}}
'''))
def test_field_readme(self, hatch, temp_dir):
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
path = (temp_dir / 'my-app')
(path / 'README.md').replace((path / 'README.txt'))
project = Project(path)
config = dict(project.raw_config)
config['project']['readme'] = 'README.txt'
project.save_config(config)
with path.as_cwd():
result = hatch('project', 'metadata', 'readme')
assert (result.exit_code == 0), result.output
assert (result.output == f'''{(path / 'README.txt').read_text()}
''')
def test_field_string(self, hatch, temp_dir, helpers):
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
path = (temp_dir / 'my-app')
with path.as_cwd():
result = hatch('project', 'metadata', 'license')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n MIT\n '))
def test_field_complex(self, hatch, temp_dir, helpers):
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
path = (temp_dir / 'my-app')
with path.as_cwd():
result = hatch('project', 'metadata', 'urls')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n {\n "Documentation": " "Issues": " "Source": " }\n ')) |
class BaseModel():
def __init__(self, opt):
self.opt = opt
self.device = torch.device(('cuda' if (opt.get('gpu_ids', None) is not None) else 'cpu'))
self.is_train = opt['is_train']
self.schedulers = []
self.optimizers = []
self.scaler = None
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
pass
def get_current_losses(self):
pass
def print_network(self):
pass
def save(self, label):
pass
def load(self):
pass
def _set_lr(self, lr_groups_l):
for (optimizer, lr_groups) in zip(self.optimizers, lr_groups_l):
for (param_group, lr) in zip(optimizer.param_groups, lr_groups):
param_group['lr'] = lr
def _get_init_lr(self):
init_lr_groups_l = []
for optimizer in self.optimizers:
init_lr_groups_l.append([v['initial_lr'] for v in optimizer.param_groups])
return init_lr_groups_l
def update_learning_rate(self, cur_iter, warmup_iter=(- 1)):
for scheduler in self.schedulers:
scheduler.step()
if (cur_iter < warmup_iter):
init_lr_g_l = self._get_init_lr()
warm_up_lr_l = []
for init_lr_g in init_lr_g_l:
warm_up_lr_l.append([((v / warmup_iter) * cur_iter) for v in init_lr_g])
self._set_lr(warm_up_lr_l)
def get_current_learning_rate(self):
return self.optimizers[0].param_groups[0]['lr']
def get_network_description(self, network):
if (isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel)):
network = network.module
s = str(network)
n = sum(map((lambda x: x.numel()), network.parameters()))
return (s, n)
def save_network(self, network, network_label, iter_label):
paths = natsort.natsorted(glob.glob(os.path.join(self.opt['path']['models'], '*_{}.pth'.format(network_label))), reverse=True)
paths = [p for p in paths if (('latest_' not in p) and (not any([(str((i * 10000)) in p.split('/')[(- 1)].split('_')) for i in range(101)])))]
if (len(paths) > 2):
for path in paths[2:]:
os.remove(path)
save_filename = '{}_{}.pth'.format(iter_label, network_label)
save_path = os.path.join(self.opt['path']['models'], save_filename)
if (isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel)):
network = network.module
state_dict = network.state_dict()
for (key, param) in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
def load_network(self, load_path, network, strict=True, submodule=None):
if (isinstance(network, nn.DataParallel) or isinstance(network, DistributedDataParallel)):
network = network.module
if (not ((submodule is None) or (submodule.lower() == 'none'.lower()))):
network = network.__getattr__(submodule)
load_net = torch.load(load_path)
load_net_clean = OrderedDict()
for (k, v) in load_net.items():
if k.startswith('module.'):
load_net_clean[k[7:]] = v
else:
load_net_clean[k] = v
network.load_state_dict(load_net_clean, strict=strict)
def save_training_state(self, epoch, iter_step):
state = {'epoch': epoch, 'iter': iter_step, 'schedulers': [], 'optimizers': [], 'scaler': None}
for s in self.schedulers:
state['schedulers'].append(s.state_dict())
for o in self.optimizers:
state['optimizers'].append(o.state_dict())
state['scaler'] = self.scaler.state_dict()
save_filename = '{}.state'.format(iter_step)
save_path = os.path.join(self.opt['path']['training_state'], save_filename)
paths = natsort.natsorted(glob.glob(os.path.join(self.opt['path']['training_state'], '*.state')), reverse=True)
paths = [p for p in paths if ('latest_' not in p)]
if (len(paths) > 2):
for path in paths[2:]:
os.remove(path)
torch.save(state, save_path)
def resume_training(self, resume_state):
resume_optimizers = resume_state['optimizers']
resume_schedulers = resume_state['schedulers']
resume_scaler = resume_state['scaler']
assert (len(resume_optimizers) == len(self.optimizers)), 'Wrong lengths of optimizers'
assert (len(resume_schedulers) == len(self.schedulers)), 'Wrong lengths of schedulers'
for (i, o) in enumerate(resume_optimizers):
self.optimizers[i].load_state_dict(o)
for (i, s) in enumerate(resume_schedulers):
self.schedulers[i].load_state_dict(s)
self.scaler.load_state_dict(resume_scaler) |
def _builtin_filter_predicate(node, builtin_name) -> bool:
if ((builtin_name == 'type') and (node.root().name == 're') and isinstance(node.func, nodes.Name) and (node.func.name == 'type') and isinstance(node.parent, nodes.Assign) and (len(node.parent.targets) == 1) and isinstance(node.parent.targets[0], nodes.AssignName) and (node.parent.targets[0].name in {'Pattern', 'Match'})):
return False
if (isinstance(node.func, nodes.Name) and (node.func.name == builtin_name)):
return True
if isinstance(node.func, nodes.Attribute):
return ((node.func.attrname == 'fromkeys') and isinstance(node.func.expr, nodes.Name) and (node.func.expr.name == 'dict'))
return False |
def get_ytplayer_config(html: str) -> Any:
logger.debug('finding initial function name')
config_patterns = ['ytplayer\\.config\\s*=\\s*', 'ytInitialPlayerResponse\\s*=\\s*']
for pattern in config_patterns:
try:
return parse_for_object(html, pattern)
except HTMLParseError as e:
logger.debug(f'Pattern failed: {pattern}')
logger.debug(e)
continue
setconfig_patterns = ['yt\\.setConfig\\(.*[\'\\"]PLAYER_CONFIG[\'\\"]:\\s*']
for pattern in setconfig_patterns:
try:
return parse_for_object(html, pattern)
except HTMLParseError:
continue
raise RegexMatchError(caller='get_ytplayer_config', pattern='config_patterns, setconfig_patterns') |
def convert_sentence_and_mention_to_features(sentence, mention, max_seq_length, tokenizer):
sentence = tokenizer.tokenize(sentence)
mention = tokenizer.tokenize(mention)
_truncate_seq_pair(sentence, mention, (max_seq_length - 3))
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
idx_tracker = 0
sentence_start_idx = 1
for token in sentence:
tokens.append(token)
segment_ids.append(0)
idx_tracker += 1
sentence_end_idx = idx_tracker
tokens.append('[SEP]')
segment_ids.append(0)
idx_tracker += 1
mention_start_idx = (idx_tracker + 1)
for token in mention:
tokens.append(token)
segment_ids.append(1)
idx_tracker += 1
mention_end_idx = idx_tracker
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert (len(input_ids) == max_seq_length), print(input_ids, len(input_ids), max_seq_length)
assert (len(input_mask) == max_seq_length), print(input_mask, len(input_mask), max_seq_length)
assert (len(segment_ids) == max_seq_length), print(segment_ids, len(segment_ids), max_seq_length)
return (input_ids, input_mask, segment_ids, (sentence_start_idx, sentence_end_idx), (mention_start_idx, mention_end_idx)) |
def save_checkpoint(args, epoch, model, optimizer):
checkpoint_path = os.path.join(args.path2saved_checkpoints, f'checkpoint_{epoch}.pt')
save_num = 0
while (save_num < 10):
try:
if False:
torch.save({'epoch': epoch, 'model_state_dict': model.module.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, checkpoint_path)
else:
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, checkpoint_path)
break
except:
save_num += 1
return |
class PartialFCAdamW(torch.nn.Module):
def __init__(self, margin_loss: Callable, embedding_size: int, num_classes: int, sample_rate: float=1.0, fp16: bool=False):
super(PartialFCAdamW, self).__init__()
assert distributed.is_initialized(), 'must initialize distributed before create this'
self.rank = distributed.get_rank()
self.world_size = distributed.get_world_size()
self.dist_cross_entropy = DistCrossEntropy()
self.embedding_size = embedding_size
self.sample_rate: float = sample_rate
self.fp16 = fp16
self.num_local: int = ((num_classes // self.world_size) + int((self.rank < (num_classes % self.world_size))))
self.class_start: int = (((num_classes // self.world_size) * self.rank) + min(self.rank, (num_classes % self.world_size)))
self.num_sample: int = int((self.sample_rate * self.num_local))
self.last_batch_size: int = 0
self.weight: torch.Tensor
self.weight_exp_avg: torch.Tensor
self.weight_exp_avg_sq: torch.Tensor
self.weight_activated: torch.nn.Parameter
self.weight_activated_exp_avg: torch.Tensor
self.weight_activated_exp_avg_sq: torch.Tensor
self.is_updated: bool = True
self.init_weight_update: bool = True
if (self.sample_rate < 1):
self.register_buffer('weight', tensor=torch.normal(0, 0.01, (self.num_local, embedding_size)))
self.register_buffer('weight_exp_avg', tensor=torch.zeros_like(self.weight))
self.register_buffer('weight_exp_avg_sq', tensor=torch.zeros_like(self.weight))
self.register_parameter('weight_activated', param=torch.nn.Parameter(torch.empty(0, 0)))
self.register_buffer('weight_activated_exp_avg', tensor=torch.empty(0, 0))
self.register_buffer('weight_activated_exp_avg_sq', tensor=torch.empty(0, 0))
else:
self.weight_activated = torch.nn.Parameter(torch.normal(0, 0.01, (self.num_local, embedding_size)))
self.step = 0
if isinstance(margin_loss, Callable):
self.margin_softmax = margin_loss
else:
raise
_grad()
def sample(self, labels, index_positive, optimizer):
self.step += 1
positive = torch.unique(labels[index_positive], sorted=True).cuda()
if ((self.num_sample - positive.size(0)) >= 0):
perm = torch.rand(size=[self.num_local]).cuda()
perm[positive] = 2.0
index = torch.topk(perm, k=self.num_sample)[1].cuda()
index = index.sort()[0].cuda()
else:
index = positive
self.weight_index = index
labels[index_positive] = torch.searchsorted(index, labels[index_positive])
self.weight_activated = torch.nn.Parameter(self.weight[self.weight_index])
self.weight_activated_exp_avg = self.weight_exp_avg[self.weight_index]
self.weight_activated_exp_avg_sq = self.weight_exp_avg_sq[self.weight_index]
if isinstance(optimizer, (torch.optim.Adam, torch.optim.AdamW)):
optimizer.state.pop(optimizer.param_groups[(- 1)]['params'][0], None)
optimizer.param_groups[(- 1)]['params'][0] = self.weight_activated
optimizer.state[self.weight_activated]['exp_avg'] = self.weight_activated_exp_avg
optimizer.state[self.weight_activated]['exp_avg_sq'] = self.weight_activated_exp_avg_sq
optimizer.state[self.weight_activated]['step'] = self.step
else:
raise
_grad()
def update(self):
if self.init_weight_update:
self.init_weight_update = False
return
if (self.sample_rate < 1):
self.weight[self.weight_index] = self.weight_activated
self.weight_exp_avg[self.weight_index] = self.weight_activated_exp_avg
self.weight_exp_avg_sq[self.weight_index] = self.weight_activated_exp_avg_sq
def forward(self, local_embeddings: torch.Tensor, local_labels: torch.Tensor, optimizer: torch.optim.Optimizer):
local_labels.squeeze_()
local_labels = local_labels.long()
self.update()
batch_size = local_embeddings.size(0)
if (self.last_batch_size == 0):
self.last_batch_size = batch_size
assert (self.last_batch_size == batch_size), 'last batch size do not equal current batch size: {} vs {}'.format(self.last_batch_size, batch_size)
_gather_embeddings = [torch.zeros((batch_size, self.embedding_size)).cuda() for _ in range(self.world_size)]
_gather_labels = [torch.zeros(batch_size).long().cuda() for _ in range(self.world_size)]
_list_embeddings = AllGather(local_embeddings, *_gather_embeddings)
distributed.all_gather(_gather_labels, local_labels)
embeddings = torch.cat(_list_embeddings)
labels = torch.cat(_gather_labels)
labels = labels.view((- 1), 1)
index_positive = ((self.class_start <= labels) & (labels < (self.class_start + self.num_local)))
labels[(~ index_positive)] = (- 1)
labels[index_positive] -= self.class_start
if (self.sample_rate < 1):
self.sample(labels, index_positive, optimizer)
with torch.cuda.amp.autocast(self.fp16):
norm_embeddings = normalize(embeddings)
norm_weight_activated = normalize(self.weight_activated)
logits = linear(norm_embeddings, norm_weight_activated)
if self.fp16:
logits = logits.float()
logits = logits.clamp((- 1), 1)
logits = self.margin_softmax(logits, labels)
loss = self.dist_cross_entropy(logits, labels)
return loss
def state_dict(self, destination=None, prefix='', keep_vars=False):
if (destination is None):
destination = collections.OrderedDict()
destination._metadata = collections.OrderedDict()
for (name, module) in self._modules.items():
if (module is not None):
module.state_dict(destination, ((prefix + name) + '.'), keep_vars=keep_vars)
if (self.sample_rate < 1):
destination['weight'] = self.weight.detach()
else:
destination['weight'] = self.weight_activated.data.detach()
return destination
def load_state_dict(self, state_dict, strict: bool=True):
if (self.sample_rate < 1):
self.weight = state_dict['weight'].to(self.weight.device)
self.weight_exp_avg.zero_()
self.weight_exp_avg_sq.zero_()
self.weight_activated.data.zero_()
self.weight_activated_exp_avg.zero_()
self.weight_activated_exp_avg_sq.zero_()
else:
self.weight_activated.data = state_dict['weight'].to(self.weight_activated.data.device) |
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if (not requires_grad):
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple('VggOutputs', ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out |
def color_jitter_rand(image, brightness=0, contrast=0, saturation=0, hue=0):
with tf.name_scope('distort_color'):
def apply_transform(i, x):
def brightness_foo():
if (brightness == 0):
return x
else:
return tf.image.random_brightness(x, max_delta=brightness)
def contrast_foo():
if (contrast == 0):
return x
else:
return tf.image.random_contrast(x, lower=(1 - contrast), upper=(1 + contrast))
def saturation_foo():
if (saturation == 0):
return x
else:
return tf.image.random_saturation(x, lower=(1 - saturation), upper=(1 + saturation))
def hue_foo():
if (hue == 0):
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2), (lambda : tf.cond(tf.less(i, 1), brightness_foo, contrast_foo)), (lambda : tf.cond(tf.less(i, 3), saturation_foo, hue_foo)))
return x
perm = tf.random_shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0.0, 1.0)
return image |
def min_weight_simple_path_greedy(graph: nx.Graph, n: int, weight_fun: Callable[([nx.Graph, List], float)]=path_weight):
def _grow_path_lowest_weight(path, partial_graph):
adjacent_edges = sorted(list(partial_graph.edges([path[0], path[(- 1)]], data='weight')), key=(lambda e: e[2]))
if (len(adjacent_edges) == 0):
return path
(u, v, _) = adjacent_edges[0]
if ((path[0] == u) or (path[(- 1)] == u)):
partial_graph.remove_node(u)
else:
partial_graph.remove_node(v)
return join_path(path, [u, v])
edges_sorted = sorted(list(graph.edges.data('weight')), key=(lambda e: e[2]))
edges_sorted = edges_sorted[:(len(edges_sorted) // 2)]
best_weight = float('inf')
best_path = None
for e in edges_sorted:
subgraph = graph.copy()
path = [e[0], e[1]]
subgraph.remove_edge(e[0], e[1])
while (len(path) < n):
path2 = _grow_path_lowest_weight(path, subgraph)
if (path2 == path):
break
else:
path = path2
if subgraph.has_edge(path[0], path[(- 1)]):
subgraph.remove_edge(path[0], path[(- 1)])
my_weight = weight_fun(graph, path)
if ((len(path) == n) and (my_weight < best_weight)):
best_path = path
best_weight = my_weight
return best_path |
def command_dnone(command, args):
def setup(parser):
add_source_options(parser)
add_double_options(parser)
(parser, opts, args) = cl_parse(command, args, setup=setup)
(dir1, dir2, smin, smax) = verify_arguements('dnone', 4, args)
opts['rel_lowpass_frequency'] = None
opts['rel_highpass_frequency'] = None
out_filename = opts.pop('output')
gfts = gftest.runComparissonStandardCheck(dir1, dir2, smin, smax, **opts)
if (out_filename is not None):
return (gfts, out_filename) |
def rally_count(rawfile, predict_file, savefile, clipinfo_file):
data = pd.read_csv(rawfile)
predict_result = pd.read_csv(predict_file)
clipinfo_data = pd.read_excel(clipinfo_file)
needed_data = data[['set', 'rally', 'hit_area', 'getpoint_player', 'lose_reason', 'type']]
clipinfo_data = clipinfo_data[['rally', 'hit_height']]
a_score = 0
b_score = 0
hit_count = 0
pre_set = 1
sets = []
rally = []
score = []
stroke = []
winner = []
error = []
for i in range(len(needed_data['hit_area'])):
if (type(needed_data['getpoint_player'][i]) != float):
if (needed_data['getpoint_player'][i] == 'A'):
a_score += 1
elif (needed_data['getpoint_player'][i] == 'B'):
b_score += 1
sets.append(needed_data['set'][i])
rally.append(needed_data['rally'][i])
if (needed_data['set'][i] != pre_set):
pre_set = needed_data['set'][i]
if (a_score > b_score):
a_score = 1
b_score = 0
else:
a_score = 0
b_score = 1
score.append(((str(a_score) + ':') + str(b_score)))
stroke.append(hit_count)
winner.append(needed_data['getpoint_player'][i])
ee = find_in_rally(clipinfo_data, needed_data['rally'][i], hit_count)
error.append(ee)
hit_count = 0
hit_count += 1
lose_detail = needed_data[['hit_area', 'lose_reason']].dropna().reset_index(drop=True)
cnt = 0
balltype = []
for i in range(len(needed_data['getpoint_player'])):
if (((needed_data['getpoint_player'][i] == 'A') or (needed_data['getpoint_player'][i] == 'B')) and (cnt in range(len(predict_result['prediction'])))):
balltype.append(predict_result['prediction'][cnt])
cnt += 1
result_data = pd.DataFrame(columns=['set', 'rally', 'score', 'stroke', 'winner', 'on_off_court', 'balltype', 'lose_area', 'error'])
result_data['set'] = sets
result_data['rally'] = rally
result_data['score'] = score
result_data['stroke'] = stroke
result_data['winner'] = winner
result_data['on_off_court'] = list(lose_detail['lose_reason'].values)
result_data['balltype'] = balltype
result_data['lose_area'] = list(lose_detail['hit_area'].values)
result_data['error'] = error
result_data = result_data.groupby(['set'], as_index=True).apply((lambda x: x[['rally', 'score', 'stroke', 'winner', 'on_off_court', 'balltype', 'lose_area', 'error']].to_dict('records'))).reset_index().rename(columns={0: 'result'})
export_json(savefile, result_data) |
class SE_OBJECT_TYPE(enum.Enum):
SE_UNKNOWN_OBJECT_TYPE = 0
SE_FILE_OBJECT = 1
SE_SERVICE = 2
SE_PRINTER = 3
SE_REGISTRY_KEY = 4
SE_LMSHARE = 5
SE_KERNEL_OBJECT = 6
SE_WINDOW_OBJECT = 7
SE_DS_OBJECT = 8
SE_DS_OBJECT_ALL = 9
SE_PROVIDER_DEFINED_OBJECT = 10
SE_WMIGUID_OBJECT = 11
SE_REGISTRY_WOW64_32KEY = 12
SE_REGISTRY_WOW64_64KEY = 13 |
def test_includes_with_inline_table() -> None:
poetry = Factory().create_poetry(project('with_include_inline_table'))
builder = SdistBuilder(poetry)
builder.build()
sdist = (((fixtures_dir / 'with_include_inline_table') / 'dist') / 'with_include-1.2.3.tar.gz')
assert sdist.exists()
with tarfile.open(str(sdist), 'r') as tar:
assert ('with_include-1.2.3/both.txt' in tar.getnames())
assert ('with_include-1.2.3/wheel_only.txt' not in tar.getnames())
assert ('with_include-1.2.3/tests/__init__.py' in tar.getnames())
assert ('with_include-1.2.3/tests/test_foo/test.py' in tar.getnames()) |
def main():
logs_dir = path.Path('logs')
data = []
for log_dir in sorted(logs_dir.listdir()):
if (not log_dir.isdir()):
continue
for eval_dir in log_dir.glob('eval*'):
m = re.match('^eval-noise_(.*)-miss_(.*)$', eval_dir.basename())
(noise, miss) = [float(x) for x in m.groups()]
for json_file in eval_dir.walk('*.json'):
with open(json_file) as f:
try:
json_data = json.load(f)
except json.decoder.JSONDecodeError:
continue
assert (json_file.stem == json_data['scene_id'])
assert (json_data['seed'] == 0)
data.append({'log_dir': str(log_dir.stem), 'scene_id': json_data['scene_id'], 'noise': noise, 'miss': miss, 'target_object_visibility': json_data['target_object_visibility'], 'sum_of_translations': json_data['sum_of_translations'], 'sum_of_max_velocities': json_data['sum_of_max_velocities']})
pandas.set_option('display.max_colwidth', 400)
pandas.set_option('display.max_columns', 500)
pandas.set_option('display.width', 1000)
pandas.set_option('display.float_format', '{:.3f}'.format)
df = pandas.DataFrame(data)
N = ((3 * 10) + 3)
count = df.groupby(['scene_id']).count()
valid_scene_ids = count[(count == N)].dropna().index.get_level_values('scene_id')
valid_scene_ids = valid_scene_ids[:600]
print('# of valid scene_ids:', len(valid_scene_ids))
df_valid = df[df['scene_id'].isin(valid_scene_ids)]
if 0:
for scene_id in df['scene_id']:
assert (len(df_valid[(df_valid['scene_id'] == scene_id)]) in [0, N])
print(df_valid.groupby(['log_dir', 'noise', 'miss']).mean())
a = df_valid.groupby(['log_dir', 'noise', 'miss']).mean().reset_index()
print(a[((a['noise'] == 0) & (a['miss'] == 0))].set_index(['log_dir', 'noise', 'miss']))
b = a[((a['noise'] == 0.3) & (a['miss'] != 0))]
print(b.groupby('log_dir').mean())
if 0:
IPython.embed() |
class ConcatOptimizer(FairseqOptimizer):
def __init__(self, args, optimizer_list):
self.optimizer_list = optimizer_list
self.scaler = None
self.is_fpl6 = None
self.check_optimizer()
if self.is_fpl6:
for optimizer in optimizer_list:
if (self.scaler is None):
self.scaler = optimizer.scaler
self.scaler.scale_window *= len(optimizer_list)
else:
optimizer.scaler = self.scaler
assert (self.scaler is optimizer.scaler)
def check_optimizer(self):
for optimizer in self.optimizer_list:
if (isinstance(optimizer, _FP16OptimizerMixin) or isinstance(optimizer, _MemoryEfficientFP16OptimizerMixin)):
self.is_fpl6 = True
else:
self.is_fpl6 = False
if ((not (isinstance(optimizer, _FP16OptimizerMixin) or isinstance(optimizer, _MemoryEfficientFP16OptimizerMixin))) and (self.is_fpl6 is True)):
raise ValueError('mixture of fp16optimizer and pf32optimizer is not supported')
if ((isinstance(optimizer, _FP16OptimizerMixin) or isinstance(optimizer, _MemoryEfficientFP16OptimizerMixin)) and (self.is_fpl6 is False)):
raise ValueError('mixture of fp16optimizer and pf32optimizer is not supported')
def params(self):
raise NotImplementedError
def __getstate__(self):
return [optimizer.__getstate__ for optimizer in self.optimizer_list]
def get_lr(self):
return [optimizer.get_lr() for optimizer in self.optimizer_list]
def set_lr(self, lr):
assert (len(lr) == len(self.optimizer_list))
for (l, o) in zip(lr, self.optimizer_list):
o.set_lr(l)
def state_dict(self):
return [optimizer.state_dict() for optimizer in self.optimizer_list]
def load_state_dict(self, state_dict, optimizer_overrides=None):
for (state, optimizer) in zip(state_dict, self.optimizer_list):
optimizer.load_state_dict(state, optimizer_overrides)
if ('loss_scale' in state_dict):
assert (self.scaler.loss_scale == state_dict['loss_scale'])
def backward(self, loss):
if self.is_fpl6:
loss_scale = self.scaler.loss_scale
loss = (loss * loss_scale)
for optimizer in self.optimizer_list:
if isinstance(optimizer, _FP16OptimizerMixin):
optimizer._needs_sync = True
elif isinstance(optimizer, _MemoryEfficientFP16OptimizerMixin):
optmizer._grads_are_scaled = True
loss.backward()
def multiply_grads(self, c):
for optimizer in self.optimizer_list:
optimizer.multiply_grads(c)
def step(self, closure=None):
for optimizer in self.optimizer_list:
optimizer.step(closure)
def clip_grad_norm(self, max_norm):
return max([optimizer.clip_grad_norm(max_norm) for optimizer in self.optimizer_list])
def zero_grad(self):
for optimizer in self.optimizer_list:
optimizer.zero_grad()
def supports_memory_efficient_fp16(self):
return all([optimizer.supports_memory_efficient_fp16 for optimizer in self.optimizer_list]) |
class DescribeStyles():
def it_supports_the_in_operator_on_style_name(self, in_fixture):
(styles, name, expected_value) = in_fixture
assert ((name in styles) is expected_value)
def it_knows_its_length(self, len_fixture):
(styles, expected_value) = len_fixture
assert (len(styles) == expected_value)
def it_can_iterate_over_its_styles(self, iter_fixture):
(styles, expected_count, style_, StyleFactory_, expected_calls) = iter_fixture
count = 0
for style in styles:
assert (style is style_)
count += 1
assert (count == expected_count)
assert (StyleFactory_.call_args_list == expected_calls)
.filterwarnings('ignore::UserWarning')
def it_can_get_a_style_by_id(self, getitem_id_fixture):
(styles, key, expected_element) = getitem_id_fixture
style = styles[key]
assert (style._element is expected_element)
def it_can_get_a_style_by_name(self, getitem_name_fixture):
(styles, key, expected_element) = getitem_name_fixture
style = styles[key]
assert (style._element is expected_element)
def it_raises_on_style_not_found(self, get_raises_fixture):
(styles, key) = get_raises_fixture
with pytest.raises(KeyError):
styles[key]
def it_can_add_a_new_style(self, add_fixture):
(styles, name, style_type, builtin) = add_fixture[:4]
(name_, StyleFactory_, style_elm_, style_) = add_fixture[4:]
style = styles.add_style(name, style_type, builtin)
styles._element.add_style_of_type.assert_called_once_with(name_, style_type, builtin)
StyleFactory_.assert_called_once_with(style_elm_)
assert (style is style_)
def it_raises_when_style_name_already_used(self, add_raises_fixture):
(styles, name) = add_raises_fixture
with pytest.raises(ValueError, match="document already contains style 'Hea"):
styles.add_style(name, None)
def it_can_get_the_default_style_for_a_type(self, default_fixture):
(styles, style_type, StyleFactory_) = default_fixture[:3]
(StyleFactory_calls, style_) = default_fixture[3:]
style = styles.default(style_type)
assert (StyleFactory_.call_args_list == StyleFactory_calls)
assert (style is style_)
def it_can_get_a_style_of_type_by_id(self, _get_by_id_, style_):
(style_id, style_type) = (42, 7)
_get_by_id_.return_value = style_
styles = Styles(None)
style = styles.get_by_id(style_id, style_type)
_get_by_id_.assert_called_once_with(styles, style_id, style_type)
assert (style is style_)
def but_it_returns_the_default_style_for_style_id_None(self, default_, style_):
style_type = 17
default_.return_value = style_
styles = Styles(None)
style = styles.get_by_id(None, style_type)
default_.assert_called_once_with(styles, style_type)
assert (style is style_)
def it_can_get_a_style_id_from_a_style(self, _get_style_id_from_style_):
style = BaseStyle(None)
style_type = 22
_get_style_id_from_style_.return_value = 'StyleId'
styles = Styles(None)
style_id = styles.get_style_id(style, style_type)
_get_style_id_from_style_.assert_called_once_with(styles, style, style_type)
assert (style_id == 'StyleId')
def and_it_can_get_a_style_id_from_a_style_name(self, _get_style_id_from_name_):
style_type = 22
_get_style_id_from_name_.return_value = 'StyleId'
styles = Styles(None)
style_id = styles.get_style_id('Style Name', style_type)
_get_style_id_from_name_.assert_called_once_with(styles, 'Style Name', style_type)
assert (style_id == 'StyleId')
def but_it_returns_None_for_a_style_or_name_of_None(self):
styles = Styles(None)
style_id = styles.get_style_id(None, style_type=22)
assert (style_id is None)
def it_gets_a_style_by_id_to_help(self, _get_by_id_fixture):
(styles, style_id, style_type, default_calls) = _get_by_id_fixture[:4]
(StyleFactory_, StyleFactory_calls, style_) = _get_by_id_fixture[4:]
style = styles._get_by_id(style_id, style_type)
assert (styles.default.call_args_list == default_calls)
assert (StyleFactory_.call_args_list == StyleFactory_calls)
assert (style is style_)
def it_gets_a_style_id_from_a_name_to_help(self, _getitem_, _get_style_id_from_style_, style_):
(style_name, style_type, style_id_) = ('Foo Bar', 1, 'FooBar')
_getitem_.return_value = style_
_get_style_id_from_style_.return_value = style_id_
styles = Styles(None)
style_id = styles._get_style_id_from_name(style_name, style_type)
styles.__getitem__.assert_called_once_with(styles, style_name)
_get_style_id_from_style_.assert_called_once_with(styles, style_, style_type)
assert (style_id is style_id_)
def it_gets_a_style_id_from_a_style_to_help(self, id_style_fixture):
(styles, style_, style_type, style_id_) = id_style_fixture
style_id = styles._get_style_id_from_style(style_, style_type)
styles.default.assert_called_once_with(styles, style_type)
assert (style_id is style_id_)
def it_raises_on_style_type_mismatch(self, id_style_raises_fixture):
(styles, style_, style_type) = id_style_raises_fixture
with pytest.raises(ValueError, match='assigned style is type 1, need type 2'):
styles._get_style_id_from_style(style_, style_type)
def it_provides_access_to_the_latent_styles(self, latent_styles_fixture):
(styles, LatentStyles_, latent_styles_) = latent_styles_fixture
latent_styles = styles.latent_styles
LatentStyles_.assert_called_once_with(styles._element.latentStyles)
assert (latent_styles is latent_styles_)
(params=[('Foo Bar', 'Foo Bar', WD_STYLE_TYPE.CHARACTER, False), ('Heading 1', 'heading 1', WD_STYLE_TYPE.PARAGRAPH, True)])
def add_fixture(self, request, styles_elm_, _getitem_, style_elm_, StyleFactory_, style_):
(name, name_, style_type, builtin) = request.param
styles = Styles(styles_elm_)
_getitem_.return_value = None
styles_elm_.add_style_of_type.return_value = style_elm_
StyleFactory_.return_value = style_
return (styles, name, style_type, builtin, name_, StyleFactory_, style_elm_, style_)
def add_raises_fixture(self, _getitem_):
styles = Styles(element('w:styles/w:style/w:name{w:val=heading 1}'))
name = 'Heading 1'
return (styles, name)
(params=[('w:styles', False, WD_STYLE_TYPE.CHARACTER), ('w:styles/w:style{w:type=paragraph,w:default=1}', True, WD_STYLE_TYPE.PARAGRAPH), ('w:styles/(w:style{w:type=table,w:default=1},w:style{w:type=table,w:default=1})', True, WD_STYLE_TYPE.TABLE)])
def default_fixture(self, request, StyleFactory_, style_):
(styles_cxml, is_defined, style_type) = request.param
styles_elm = element(styles_cxml)
styles = Styles(styles_elm)
StyleFactory_calls = ([call(styles_elm[(- 1)])] if is_defined else [])
StyleFactory_.return_value = style_
expected_value = (style_ if is_defined else None)
return (styles, style_type, StyleFactory_, StyleFactory_calls, expected_value)
(params=[('w:styles/w:style{w:type=paragraph,w:styleId=Foo}', 'Foo', WD_STYLE_TYPE.PARAGRAPH), ('w:styles/w:style{w:type=paragraph,w:styleId=Foo}', 'Bar', WD_STYLE_TYPE.PARAGRAPH), ('w:styles/w:style{w:type=table,w:styleId=Bar}', 'Bar', WD_STYLE_TYPE.PARAGRAPH)])
def _get_by_id_fixture(self, request, default_, StyleFactory_, style_):
(styles_cxml, style_id, style_type) = request.param
styles_elm = element(styles_cxml)
style_elm = styles_elm[0]
styles = Styles(styles_elm)
default_calls = ([] if (style_id == 'Foo') else [call(styles, style_type)])
StyleFactory_calls = ([call(style_elm)] if (style_id == 'Foo') else [])
default_.return_value = StyleFactory_.return_value = style_
return (styles, style_id, style_type, default_calls, StyleFactory_, StyleFactory_calls, style_)
(params=[('w:styles/(w:style{%s,w:styleId=Foobar},w:style,w:style)', 0), ('w:styles/(w:style,w:style{%s,w:styleId=Foobar},w:style)', 1), ('w:styles/(w:style,w:style,w:style{%s,w:styleId=Foobar})', 2)])
def getitem_id_fixture(self, request):
(styles_cxml_tmpl, style_idx) = request.param
styles_cxml = (styles_cxml_tmpl % 'w:type=paragraph')
styles = Styles(element(styles_cxml))
expected_element = styles._element[style_idx]
return (styles, 'Foobar', expected_element)
(params=[('w:styles/(w:style%s/w:name{w:val=foo},w:style)', 'foo', 0), ('w:styles/(w:style,w:style%s/w:name{w:val=foo})', 'foo', 1), ('w:styles/w:style%s/w:name{w:val=heading 1}', 'Heading 1', 0)])
def getitem_name_fixture(self, request):
(styles_cxml_tmpl, key, style_idx) = request.param
styles_cxml = (styles_cxml_tmpl % '{w:type=character}')
styles = Styles(element(styles_cxml))
expected_element = styles._element[style_idx]
return (styles, key, expected_element)
(params=['w:styles/(w:style,w:style/w:name{w:val=foo},w:style)', 'w:styles/(w:style{w:styleId=foo},w:style,w:style)'])
def get_raises_fixture(self, request):
styles_cxml = request.param
styles = Styles(element(styles_cxml))
return (styles, 'bar')
(params=[True, False])
def id_style_fixture(self, request, default_, style_):
style_is_default = request.param
styles = Styles(None)
(style_id, style_type) = ('FooBar', 1)
default_.return_value = (style_ if style_is_default else None)
(style_.style_id, style_.type) = (style_id, style_type)
expected_value = (None if style_is_default else style_id)
return (styles, style_, style_type, expected_value)
def id_style_raises_fixture(self, style_):
styles = Styles(None)
style_.type = 1
style_type = 2
return (styles, style_, style_type)
(params=[('w:styles/w:style/w:name{w:val=heading 1}', 'Heading 1', True), ('w:styles/w:style/w:name{w:val=Foo Bar}', 'Foo Bar', True), ('w:styles/w:style/w:name{w:val=heading 1}', 'Foobar', False), ('w:styles', 'Foobar', False)])
def in_fixture(self, request):
(styles_cxml, name, expected_value) = request.param
styles = Styles(element(styles_cxml))
return (styles, name, expected_value)
(params=[('w:styles', 0), ('w:styles/w:style', 1), ('w:styles/(w:style,w:style)', 2), ('w:styles/(w:style,w:style,w:style)', 3)])
def iter_fixture(self, request, StyleFactory_, style_):
(styles_cxml, expected_count) = request.param
styles_elm = element(styles_cxml)
styles = Styles(styles_elm)
expected_calls = [call(style_elm) for style_elm in styles_elm]
StyleFactory_.return_value = style_
return (styles, expected_count, style_, StyleFactory_, expected_calls)
def latent_styles_fixture(self, LatentStyles_, latent_styles_):
styles = Styles(element('w:styles/w:latentStyles'))
return (styles, LatentStyles_, latent_styles_)
(params=[('w:styles', 0), ('w:styles/w:style', 1), ('w:styles/(w:style,w:style)', 2), ('w:styles/(w:style,w:style,w:style)', 3)])
def len_fixture(self, request):
(styles_cxml, expected_value) = request.param
styles = Styles(element(styles_cxml))
return (styles, expected_value)
def default_(self, request):
return method_mock(request, Styles, 'default')
def _get_by_id_(self, request):
return method_mock(request, Styles, '_get_by_id')
def _getitem_(self, request):
return method_mock(request, Styles, '__getitem__')
def _get_style_id_from_name_(self, request):
return method_mock(request, Styles, '_get_style_id_from_name')
def _get_style_id_from_style_(self, request):
return method_mock(request, Styles, '_get_style_id_from_style')
def LatentStyles_(self, request, latent_styles_):
return class_mock(request, 'docx.styles.styles.LatentStyles', return_value=latent_styles_)
def latent_styles_(self, request):
return instance_mock(request, LatentStyles)
def style_(self, request):
return instance_mock(request, BaseStyle)
def StyleFactory_(self, request):
return function_mock(request, 'docx.styles.styles.StyleFactory')
def style_elm_(self, request):
return instance_mock(request, CT_Style)
def styles_elm_(self, request):
return instance_mock(request, CT_Styles) |
def mobilenet_v1_arg_scope(is_training=True, stddev=0.09):
batch_norm_params = {'is_training': False, 'center': True, 'scale': True, 'decay': 0.9997, 'epsilon': 0.001, 'trainable': False}
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
if cfg.MOBILENET.REGU_DEPTH:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], trainable=is_training, weights_initializer=weights_init, activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm, padding='SAME'):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer) as sc:
return sc |
class TestAcoustics():
.benchmark(group='ComplexCepstrum')
.parametrize('num_samps', [(2 ** 8), (2 ** 14)])
.parametrize('n', [123, 256])
class TestComplexCepstrum():
def cpu_version(self, sig, n):
return complex_cepstrum(sig, n)
def gpu_version(self, sig, n):
with cp.cuda.Stream.null:
out = cusignal.complex_cepstrum(sig, n)
cp.cuda.Stream.null.synchronize()
return out
.cpu
def test_complex_cepstrum_cpu(self, rand_data_gen, benchmark, num_samps, n):
(cpu_sig, _) = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, n)
def test_complex_cepstrum_gpu(self, rand_data_gen, gpubenchmark, num_samps, n):
(cpu_sig, gpu_sig) = rand_data_gen(num_samps)
output = gpubenchmark(self.gpu_version, gpu_sig, n)
key = self.cpu_version(cpu_sig, n)
array_equal(output, key)
.benchmark(group='RealCepstrum')
.parametrize('num_samps', [(2 ** 8), (2 ** 14)])
.parametrize('n', [123, 256])
class TestRealCepstrum():
def cpu_version(self, sig, n):
return real_cepstrum(sig, n)
def gpu_version(self, sig, n):
with cp.cuda.Stream.null:
out = cusignal.real_cepstrum(sig, n)
cp.cuda.Stream.null.synchronize()
return out
.cpu
def test_real_cepstrum_cpu(self, rand_data_gen, benchmark, num_samps, n):
(cpu_sig, _) = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, n)
def test_real_cepstrum_gpu(self, rand_data_gen, gpubenchmark, num_samps, n):
(cpu_sig, gpu_sig) = rand_data_gen(num_samps)
output = gpubenchmark(self.gpu_version, gpu_sig, n)
key = self.cpu_version(cpu_sig, n)
array_equal(output, key)
.benchmark(group='InverseComplexCepstrum')
.parametrize('num_samps', [(2 ** 10)])
.parametrize('n', [123, 256])
class TestInverseComplexCepstrum():
def cpu_version(self, sig, n):
return inverse_complex_cepstrum(sig, n)
def gpu_version(self, sig, n):
with cp.cuda.Stream.null:
out = cusignal.inverse_complex_cepstrum(sig, n)
cp.cuda.Stream.null.synchronize()
return out
.cpu
def test_inverse_complex_cepstrum_cpu(self, rand_data_gen, benchmark, num_samps, n):
(cpu_sig, _) = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, n)
def test_inverse_complex_cepstrum_gpu(self, rand_data_gen, gpubenchmark, num_samps, n):
(cpu_sig, gpu_sig) = rand_data_gen(num_samps)
output = gpubenchmark(self.gpu_version, gpu_sig, n)
key = self.cpu_version(cpu_sig, n)
array_equal(output, key)
.benchmark(group='MinimumPhase')
.parametrize('num_samps', [(2 ** 8), (2 ** 14)])
.parametrize('n', [123, 256])
class TestMinimumPhase():
def cpu_version(self, sig, n):
return minimum_phase(sig, n)
def gpu_version(self, sig, n):
with cp.cuda.Stream.null:
out = cusignal.minimum_phase(sig, n)
cp.cuda.Stream.null.synchronize()
return out
.cpu
def test_minimum_phase_cpu(self, rand_data_gen, benchmark, num_samps, n):
(cpu_sig, _) = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, n)
def test_minimum_phase_gpu(self, rand_data_gen, gpubenchmark, num_samps, n):
(cpu_sig, gpu_sig) = rand_data_gen(num_samps)
output = gpubenchmark(self.gpu_version, gpu_sig, n)
key = self.cpu_version(cpu_sig, n)
array_equal(output, key) |
def map_errors_and_warnings(objs, error_container=code_to_error, warning_container=code_to_warning):
for obj in objs:
if (not issubclass(type(obj), (type(Warning), type(Error)))):
continue
code = getattr(obj, 'code', None)
if (code is None):
continue
if issubclass(obj, Error):
base = Error
container = error_container
elif issubclass(obj, Warning):
base = Warning
container = warning_container
else:
continue
cur_obj = container.get(code)
if ((cur_obj is None) or issubclass(cur_obj, obj)):
container[code] = obj
if hasattr(obj, 'pg_code'):
container[obj.pg_code] = obj |
def validate(val_loader, model, criterion, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5, prefix='Test: ')
model.eval()
with torch.no_grad():
end = time.time()
for (i, (input, target)) in enumerate(val_loader):
if (args.test_run and (i > args.test_iter)):
break
if (args.gpu is not None):
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(input)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.print(i)
print(' * {top1.avg:.3f} {top5.avg:.3f}'.format(top1=top1, top5=top5))
if args.tensorboard:
args.writer.add_scalar('Loss/Val', losses.avg, (epoch + 1))
args.writer.add_scalar('Prec/Val1', top1.avg, (epoch + 1))
args.writer.add_scalar('Prec/Val5', top5.avg, (epoch + 1))
args.eval_losses.append(losses.avg)
args.eval_top1.append(top1.avg)
args.eval_top5.append(top5.avg)
return top1.avg |
class CLIP(nn.Module):
def __init__(self, embed_dim: int, vision_cfg: CLIPVisionCfg, text_cfg: CLIPTextCfg, quick_gelu: bool=False, cast_dtype: Optional[torch.dtype]=None):
super().__init__()
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
self.transformer = text.transformer
self.embed_dim = embed_dim
self.vocab_size = text.vocab_size
self.token_embedding = text.token_embedding
self.positional_embedding = text.positional_embedding
self.ln_final = text.ln_final
self.text_projection = text.text_projection
self.register_buffer('attn_mask', text.attn_mask, persistent=False)
self.logit_scale = nn.Parameter((torch.ones([]) * np.log((1 / 0.07))))
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
.ignore
def set_grad_checkpointing(self, enable=True):
self.visual.set_grad_checkpointing(enable)
self.transformer.grad_checkpointing = enable
.ignore
def no_weight_decay(self):
return {'logit_scale'}
def encode_image(self, image, normalize: bool=False):
features = self.visual(image)
return (F.normalize(features, dim=(- 1)) if normalize else features)
def encode_text(self, text, normalize: bool=False):
cast_dtype = self.transformer.get_cast_dtype()
x = self.token_embedding(text).to(cast_dtype)
x = (x + self.positional_embedding.to(cast_dtype))
x = x.permute(1, 0, 2)
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2)
x = self.ln_final(x)
x = (x[(torch.arange(x.shape[0]), text.argmax(dim=(- 1)))] self.text_projection)
return (F.normalize(x, dim=(- 1)) if normalize else x)
def forward(self, image, text):
image_features = self.encode_image(image, normalize=True)
text_features = self.encode_text(text, normalize=True)
return (image_features, text_features, self.logit_scale.exp()) |
def random_neuron_single_bit_inj_batched(pfi: core.FaultInjection, layer_ranges, batch_random=True):
pfi.set_conv_max(layer_ranges)
locations = ([random_neuron_location(pfi) for _ in range(pfi.batch_size)] if batch_random else ([random_neuron_location(pfi)] * pfi.batch_size))
(random_layers, random_c, random_h, random_w) = map(list, zip(*locations))
return pfi.declare_neuron_fault_injection(batch=range(pfi.batch_size), layer_num=random_layers, dim1=random_c, dim2=random_h, dim3=random_w, function=pfi.single_bit_flip_signed_across_batch) |
class PlaybackTimer():
def __init__(self) -> None:
self._elapsed = 0.0
self._started_at = None
def start(self) -> None:
if (self._started_at is None):
self._started_at = time.perf_counter()
def pause(self) -> None:
self._elapsed = self.get_time()
self._started_at = None
def reset(self) -> None:
self._elapsed = 0.0
if (self._started_at is not None):
self._started_at = time.perf_counter()
def get_time(self) -> float:
if (self._started_at is None):
return self._elapsed
return ((time.perf_counter() - self._started_at) + self._elapsed)
def set_time(self, value: float) -> None:
self.reset()
self._elapsed = value |
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif (not isinstance(config, mmcv.Config)):
raise TypeError(f'config must be a filename or Config object, but got {type(config)}')
if (cfg_options is not None):
config.merge_from_dict(cfg_options)
if config.model.get('pretrained'):
config.model.pretrained = None
config.model.train_cfg = None
model = build_detector(config.model, test_cfg=config.get('test_cfg'))
if (checkpoint is not None):
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn("Class names are not saved in the checkpoint's meta data, use COCO classes by default.")
model.CLASSES = get_classes('coco')
model.cfg = config
model.to(device)
model.eval()
return model |
class TestSequenceGetItem(TestNameCheckVisitorBase):
_passes()
def test_list(self):
from typing import List
def capybara(lst: List[int], i: int, s: slice, unannotated) -> None:
assert_is_value(lst[0], TypedValue(int))
assert_is_value(lst[(- 1)], TypedValue(int))
assert_is_value(lst[:1], GenericValue(list, [TypedValue(int)]))
assert_is_value(lst[i], TypedValue(int))
assert_is_value(lst[s], GenericValue(list, [TypedValue(int)]))
assert_is_value(lst[unannotated], AnyValue(AnySource.from_another))
empty = []
assert_is_value(empty[0], AnyValue(AnySource.unreachable))
assert_is_value(empty[1:], KnownValue([]))
assert_is_value(empty[i], AnyValue(AnySource.unreachable))
assert_is_value(empty[s], SequenceValue(list, []))
assert_is_value(empty[unannotated], AnyValue(AnySource.from_another))
known = [1, 2]
assert_is_value(known[0], KnownValue(1))
assert_is_value(known[(- 1)], KnownValue(2))
assert_is_value(known[(- 5)], (KnownValue(1) | KnownValue(2)))
assert_is_value(known[1:], KnownValue([2]))
assert_is_value(known[::(- 1)], KnownValue([2, 1]))
assert_is_value(known[i], (KnownValue(1) | KnownValue(2)))
assert_is_value(known[s], make_simple_sequence(list, [KnownValue(1), KnownValue(2)]))
assert_is_value(known[unannotated], AnyValue(AnySource.from_another))
_passes()
def test_tuple(self):
from typing import Tuple
def capybara(tpl: Tuple[(int, ...)], i: int, s: slice, unannotated) -> None:
assert_is_value(tpl[0], TypedValue(int))
assert_is_value(tpl[(- 1)], TypedValue(int))
assert_is_value(tpl[:1], GenericValue(tuple, [TypedValue(int)]))
assert_is_value(tpl[:], GenericValue(tuple, [TypedValue(int)]))
assert_is_value(tpl[i], TypedValue(int))
assert_is_value(tpl[s], GenericValue(tuple, [TypedValue(int)]))
assert_is_value(tpl[unannotated], AnyValue(AnySource.from_another))
empty = ()
assert_is_value(empty[0], AnyValue(AnySource.error))
assert_is_value(empty[1:], KnownValue(()))
assert_is_value(empty[i], AnyValue(AnySource.unreachable))
assert_is_value(empty[s], SequenceValue(tuple, []))
assert_is_value(empty[unannotated], AnyValue(AnySource.from_another))
known = (1, 2)
assert_is_value(known[0], KnownValue(1))
assert_is_value(known[(- 1)], KnownValue(2))
assert_is_value(known[(- 5)], AnyValue(AnySource.error))
assert_is_value(known[1:], KnownValue((2,)))
assert_is_value(known[::(- 1)], KnownValue((2, 1)))
assert_is_value(known[i], (KnownValue(1) | KnownValue(2)))
assert_is_value(known[s], make_simple_sequence(tuple, [KnownValue(1), KnownValue(2)]))
assert_is_value(known[unannotated], AnyValue(AnySource.from_another))
_passes()
def test_list_index(self):
def capybara(x):
lst = ['a', 'b', int(x)]
assert_is_value(lst[0], KnownValue('a'))
assert_is_value(lst[2], TypedValue(int))
assert_is_value(lst[(- 2)], KnownValue('b'))
assert_is_value(lst[5], ((KnownValue('a') | KnownValue('b')) | TypedValue(int)))
_passes()
def test_tuple_index(self):
def capybara(x):
tpl = ('a', 'b', int(x))
assert_is_value(tpl[0], KnownValue('a'))
assert_is_value(tpl[2], TypedValue(int))
assert_is_value(tpl[(- 2)], KnownValue('b'))
assert_is_value(tpl[5], AnyValue(AnySource.error))
_passes()
def test_tuple_annotation(self):
from typing import Tuple
def capybara(tpl: Tuple[(int, str, float)]) -> None:
assert_is_value(tpl[0], TypedValue(int))
assert_is_value(tpl[(- 2)], TypedValue(str))
assert_is_value(tpl[2], TypedValue(float))
_passes()
def test_list_in_lambda(self):
from typing import List
def capybara(words: List[str]):
sorted_indexes = sorted(range(len(words)), key=(lambda i: words[i]))
return sorted_indexes
_passes()
def test_subclasses(self):
import time
class MyList(list):
pass
class MyTuple(tuple):
pass
def capybara(t: time.struct_time, ml: MyList, mt: MyTuple):
assert_is_value(t[0], TypedValue(int))
assert_is_value(t[:], TypedValue(tuple))
assert_is_value(t[:6], TypedValue(tuple))
assert_is_value(ml[0], AnyValue(AnySource.generic_argument))
assert_is_value(ml[:], TypedValue(list))
assert_is_value(mt[0], AnyValue(AnySource.generic_argument))
assert_is_value(mt[:], TypedValue(tuple)) |
def mpi_mean(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert (x.ndim > 0)
if (comm is None):
comm = MPI.COMM_WORLD
xsum = x.sum(axis=axis, keepdims=keepdims)
n = xsum.size
localsum = np.zeros((n + 1), x.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = x.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
return ((globalsum[:n].reshape(xsum.shape) / globalsum[n]), globalsum[n]) |
def compress_session(sess, compressible_ops):
layer_a = sess.graph.get_operation_by_name(compressible_ops[0])
list_of_module_comp_ratio_pairs = [ModuleCompRatioPair(layer_a, 0.5)]
manual_params = SpatialSvdParameters.ManualModeParams(list_of_module_comp_ratio_pairs=list_of_module_comp_ratio_pairs)
params = SpatialSvdParameters(input_op_names=['input_1'], output_op_names=['act_softmax/Softmax'], mode=SpatialSvdParameters.Mode.manual, params=manual_params)
scheme = CompressionScheme.spatial_svd
metric = CostMetric.mac
def evaluate(sess, iterations, use_cuda):
return 1
(sess, _) = ModelCompressor.compress_model(sess=sess, working_dir='./', eval_callback=evaluate, eval_iterations=None, input_shape=(1, 3, 224, 224), compress_scheme=scheme, cost_metric=metric, parameters=params)
return sess |
def bbc_prepDefaultLex(outFile):
if (not os.environ.get('MAKE_SPEECH_ROM', 0)):
return
sd = open(os.environ['SPEECH_DISK'])
d = getBuf(sd).read()
i = d.index((((((as_utf8('LO') + chr(128)) + as_utf8('LP')) + chr(128)) + chr(130)) + chr(17)))
j = d.index(as_utf8('>OUS_'), i)
assert ((j - i) == 5763), 'Is this really an original disk image?'
getBuf(outFile).write(d[i:j]) |
def default_hp_space_wandb(trial) -> Dict[(str, float)]:
from .integrations import is_wandb_available
if (not is_wandb_available()):
raise ImportError('This function needs wandb installed: `pip install wandb`')
return {'method': 'random', 'metric': {'name': 'objective', 'goal': 'minimize'}, 'parameters': {'learning_rate': {'distribution': 'uniform', 'min': 1e-06, 'max': 0.0001}, 'num_train_epochs': {'distribution': 'int_uniform', 'min': 1, 'max': 6}, 'seed': {'distribution': 'int_uniform', 'min': 1, 'max': 40}, 'per_device_train_batch_size': {'values': [4, 8, 16, 32, 64]}}} |
class TARGET_LSTM(object):
def __init__(self, config, params):
self.num_emb = config.num_emb
self.batch_size = config.gen_batch_size
self.emb_dim = config.emb_dim
self.hidden_dim = config.hidden_dim
self.sequence_length = config.sequence_length
self.start_token = tf.constant(([config.start_token] * self.batch_size), dtype=tf.int32)
self.g_params = []
self.temperature = 1.0
self.params = params
tf.set_random_seed(66)
with tf.variable_scope('generator'):
self.g_embeddings = tf.Variable(self.params[0])
self.g_params.append(self.g_embeddings)
self.g_recurrent_unit = self.create_recurrent_unit(self.g_params)
self.g_output_unit = self.create_output_unit(self.g_params)
self.x = tf.placeholder(tf.int32, shape=[self.batch_size, self.sequence_length])
self.processed_x = tf.transpose(tf.nn.embedding_lookup(self.g_embeddings, self.x), perm=[1, 0, 2])
self.h0 = tf.zeros([self.batch_size, self.hidden_dim])
self.h0 = tf.stack([self.h0, self.h0])
gen_o = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length, dynamic_size=False, infer_shape=True)
gen_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=self.sequence_length, dynamic_size=False, infer_shape=True)
def _g_recurrence(i, x_t, h_tm1, gen_o, gen_x):
h_t = self.g_recurrent_unit(x_t, h_tm1)
o_t = self.g_output_unit(h_t)
log_prob = tf.log(tf.nn.softmax(o_t))
next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)
x_tp1 = tf.nn.embedding_lookup(self.g_embeddings, next_token)
gen_o = gen_o.write(i, tf.reduce_sum(tf.multiply(tf.one_hot(next_token, self.num_emb, 1.0, 0.0), tf.nn.softmax(o_t)), 1))
gen_x = gen_x.write(i, next_token)
return ((i + 1), x_tp1, h_t, gen_o, gen_x)
(_, _, _, self.gen_o, self.gen_x) = control_flow_ops.while_loop(cond=(lambda i, _1, _2, _3, _4: (i < self.sequence_length)), body=_g_recurrence, loop_vars=(tf.constant(0, dtype=tf.int32), tf.nn.embedding_lookup(self.g_embeddings, self.start_token), self.h0, gen_o, gen_x))
self.gen_x = self.gen_x.stack()
self.gen_x = tf.transpose(self.gen_x, perm=[1, 0])
g_predictions = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length, dynamic_size=False, infer_shape=True)
ta_emb_x = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length)
ta_emb_x = ta_emb_x.unstack(self.processed_x)
def _pretrain_recurrence(i, x_t, h_tm1, g_predictions):
h_t = self.g_recurrent_unit(x_t, h_tm1)
o_t = self.g_output_unit(h_t)
g_predictions = g_predictions.write(i, tf.nn.softmax(o_t))
x_tp1 = ta_emb_x.read(i)
return ((i + 1), x_tp1, h_t, g_predictions)
(_, _, _, self.g_predictions) = control_flow_ops.while_loop(cond=(lambda i, _1, _2, _3: (i < self.sequence_length)), body=_pretrain_recurrence, loop_vars=(tf.constant(0, dtype=tf.int32), tf.nn.embedding_lookup(self.g_embeddings, self.start_token), self.h0, g_predictions))
self.g_predictions = tf.transpose(self.g_predictions.stack(), perm=[1, 0, 2])
self.pretrain_loss = ((- tf.reduce_sum((tf.one_hot(tf.to_int32(tf.reshape(self.x, [(- 1)])), self.num_emb, 1.0, 0.0) * tf.log(tf.reshape(self.g_predictions, [(- 1), self.num_emb]))))) / (self.sequence_length * self.batch_size))
self.out_loss = tf.reduce_sum(tf.reshape((- tf.reduce_sum((tf.one_hot(tf.to_int32(tf.reshape(self.x, [(- 1)])), self.num_emb, 1.0, 0.0) * tf.log(tf.reshape(self.g_predictions, [(- 1), self.num_emb]))), 1)), [(- 1), self.sequence_length]), 1)
def generate(self, session):
outputs = session.run(self.gen_x)
return outputs
def init_matrix(self, shape):
return tf.random_normal(shape, stddev=1.0)
def create_recurrent_unit(self, params):
self.Wi = tf.Variable(self.params[1])
self.Ui = tf.Variable(self.params[2])
self.bi = tf.Variable(self.params[3])
self.Wf = tf.Variable(self.params[4])
self.Uf = tf.Variable(self.params[5])
self.bf = tf.Variable(self.params[6])
self.Wog = tf.Variable(self.params[7])
self.Uog = tf.Variable(self.params[8])
self.bog = tf.Variable(self.params[9])
self.Wc = tf.Variable(self.params[10])
self.Uc = tf.Variable(self.params[11])
self.bc = tf.Variable(self.params[12])
params.extend([self.Wi, self.Ui, self.bi, self.Wf, self.Uf, self.bf, self.Wog, self.Uog, self.bog, self.Wc, self.Uc, self.bc])
def unit(x, hidden_memory_tm1):
(previous_hidden_state, c_prev) = tf.unstack(hidden_memory_tm1)
i = tf.sigmoid(((tf.matmul(x, self.Wi) + tf.matmul(previous_hidden_state, self.Ui)) + self.bi))
f = tf.sigmoid(((tf.matmul(x, self.Wf) + tf.matmul(previous_hidden_state, self.Uf)) + self.bf))
o = tf.sigmoid(((tf.matmul(x, self.Wog) + tf.matmul(previous_hidden_state, self.Uog)) + self.bog))
c_ = tf.nn.tanh(((tf.matmul(x, self.Wc) + tf.matmul(previous_hidden_state, self.Uc)) + self.bc))
c = ((f * c_prev) + (i * c_))
current_hidden_state = (o * tf.nn.tanh(c))
return tf.stack([current_hidden_state, c])
return unit
def create_output_unit(self, params):
self.Wo = tf.Variable(self.params[13])
self.bo = tf.Variable(self.params[14])
params.extend([self.Wo, self.bo])
def unit(hidden_memory_tuple):
(hidden_state, c_prev) = tf.unstack(hidden_memory_tuple)
logits = (tf.matmul(hidden_state, self.Wo) + self.bo)
return logits
return unit |
class SecondPage(Gtk.Box):
def __init__(self, parent_window):
super().__init__(spacing=10)
self.__parent_window = parent_window
self.grid = Gtk.Grid()
vbox = Gtk.VBox()
vbox_container = Gtk.VBox()
scroller = Gtk.ScrolledWindow()
scroller.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.ALWAYS)
label_start = Gtk.Label()
label_start.set_markup('<b>Identifying your Keyboard...</b>\n\nPress the <b>2nd</b> key <b>Left</b> of the spacebar.\n\n<sub>If stuck here then unset Overlay (Super) key on your DE.</sub>')
label_start.set_valign(Gtk.Align.START)
label_start.set_halign(Gtk.Align.START)
label_start.set_line_wrap(True)
vbox.add(label_start)
scroller.add(vbox)
hbox = Gtk.HBox()
previous = Gtk.Button(label='')
for child in previous.get_children():
child.set_label('<b>Go Back</b>')
child.set_use_markup(True)
previous.connect('clicked', self.goback)
previous.set_margin_end(315)
hbox.add(previous)
hbox.set_hexpand(False)
hbox.set_vexpand(False)
hbox.set_margin_bottom(6)
hbox.set_margin_end(25)
scroller.set_hexpand(True)
scroller.set_vexpand(True)
vbox_container.add(scroller)
vbox_container.set_margin_top(55)
vbox_container.set_margin_end(25)
self.grid.set_margin_start(157)
vbox_container.set_margin_bottom(18)
vbox.set_margin_end(10)
vbox.set_margin_bottom(18)
self.grid.add(vbox_container)
self.grid.attach_next_to(hbox, vbox_container, Gtk.PositionType.BOTTOM, 2, 1)
self.add(self.grid)
def goback(self, *args):
for grandkid in self.__parent_window.overlay.get_children():
self.__parent_window.overlay.remove(grandkid)
self.__parent_window.overlay.add(self.__parent_window.background)
self.__parent_window.overlay.add_overlay(self.__parent_window.container)
self.__parent_window.container.add(self.__parent_window.first_page)
self.__parent_window.container.remove(self.__parent_window.second_page)
self.__parent_window.setupwin.disconnect(self.__parent_window.setupwin.signal_id)
self.__parent_window.setupwin.show_all()
self.__parent_window.first_onward.grab_focus()
self.hide() |
class TextDataset(Dataset):
def __init__(self, txt_list, tokenizer, max_length):
self.labels = []
self.input_ids = []
self.attn_masks = []
for txt in txt_list:
encodings_dict = tokenizer(txt, truncation=True, max_length=max_length, pad_to_max_length=False)
self.input_ids.append(torch.tensor(encodings_dict['input_ids']))
self.attn_masks.append(torch.tensor(encodings_dict['attention_mask']))
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx):
return (self.input_ids[idx], self.attn_masks[idx]) |
class GUDevMonitorObserver(GObject.Object, _ObserverMixin):
_action_signal_map = {'add': 'device-added', 'remove': 'device-removed', 'change': 'device-changed', 'move': 'device-moved'}
__gsignals__ = {str('device-event'): (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (GObject.TYPE_STRING, GObject.TYPE_PYOBJECT)), str('device-added'): (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (GObject.TYPE_PYOBJECT,)), str('device-removed'): (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (GObject.TYPE_PYOBJECT,)), str('device-changed'): (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (GObject.TYPE_PYOBJECT,)), str('device-moved'): (GObject.SIGNAL_RUN_LAST, GObject.TYPE_NONE, (GObject.TYPE_PYOBJECT,))}
def __init__(self, monitor):
GObject.Object.__init__(self)
self._setup_observer(monitor)
import warnings
warnings.warn('Will be removed in 1.0. Use pyudev.glib.MonitorObserver instead.', DeprecationWarning)
def _emit_event(self, device):
self.emit('device-event', device.action, device)
signal = self._action_signal_map.get(device.action)
if (signal is not None):
self.emit(signal, device) |
def test_draw_trajectory() -> None:
on_image = np.zeros((224, 244, 3), dtype=np.uint8)
positions = np.asarray([(0, 0), (0, 10), (0, 20)])
draw_trajectory(on_image, positions, (255, 255, 255))
assert np.all((on_image[(0, 0)] == (255, 255, 255)))
assert np.all((on_image[(10, 0)] == (255, 255, 255)))
assert np.all((on_image[(20, 0)] == (255, 255, 255)))
assert np.all((on_image[(0, 20)] == (0, 0, 0)))
assert np.all((on_image[(0, 10)] == (0, 0, 0)))
on_image = np.zeros((224, 244, 3), dtype=np.uint8)
yaws = np.asarray([[0.1], [(- 0.1)], [0.0]])
draw_trajectory(on_image, positions, (255, 255, 255), yaws=yaws)
assert np.all((on_image[(0, 0)] == (255, 255, 255)))
assert np.all((on_image[(10, 0)] == (255, 255, 255)))
assert np.all((on_image[(20, 0)] == (255, 255, 255))) |
def get_vehiclerouting_solution(instance: np.ndarray, n: int, K: int, result: MinimumEigensolverResult) -> List[int]:
del instance, K
v = result.eigenstate
N = ((n - 1) * n)
index_value = [x for x in range(len(v)) if (v[x] == max(v))][0]
string_value = '{0:b}'.format(index_value)
while (len(string_value) < N):
string_value = ('0' + string_value)
x_sol = []
for elements in string_value:
if (elements == '0'):
x_sol.append(0)
else:
x_sol.append(1)
x_sol = np.flip(x_sol, axis=0)
return x_sol |
def test_rtf_footer():
t = ''
result = format_rtf(t)
expected = ''
msg = "RTF documents are expected to end with '{expected}'\n\t\tEnds intead with '{result}'\n\t(WARNING: Partial Output of Result!)".format(expected=_escape(expected), result=_escape(result[(- len(expected)):]))
assert result.endswith((expected + foot)), msg |
class DenseSimpleUnit(nn.Module):
def __init__(self, in_channels, out_channels, dropout_rate):
super(DenseSimpleUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
inc_channels = (out_channels - in_channels)
self.conv = pre_conv3x3_block(in_channels=in_channels, out_channels=inc_channels)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
identity = x
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = torch.cat((identity, x), dim=1)
return x |
def test_aggregated_node_min_flow(model):
A = Input(model, 'A', max_flow=20.0, cost=1)
B = Input(model, 'B', max_flow=20.0, cost=100)
Z = Output(model, 'Z', max_flow=100, cost=0)
A.connect(Z)
B.connect(Z)
agg = AggregatedNode(model, 'agg', [A, B])
agg.min_flow = 15.0
model.run()
assert_allclose(agg.flow, 15.0)
assert_allclose(A.flow, 15.0)
assert_allclose(B.flow, 0.0) |
class fastPredictBertMrc(fastPredict):
def __init__(self, model_path, config):
self.orig_test_file = os.path.join(config.get('data_dir'), config.get('orig_test'))
super(fastPredictBertMrc, self).__init__(model_path, config)
def init_data_loader(self, config):
vocab_file_path = os.path.join(config.get('bert_pretrained_model_path'), config.get('vocab_file'))
slot_file = os.path.join(config.get('slot_list_root_path'), config.get('bert_slot_complete_file_name'))
bert_config_file = os.path.join(config.get('bert_pretrained_model_path'), config.get('bert_config_path'))
data_loader = bertMRCPrepareData(vocab_file_path, slot_file, config, bert_config_file, 512, gen_new_data=True, is_inference=True)
return data_loader
def predict_mrc(self, text, query_len, token_type_ids):
text_length = len(text)
predictions = self.predict_fn({'words': [text], 'text_length': [text_length], 'query_length': [query_len], 'token_type_ids': [token_type_ids]})
(start_ids, end_ids) = (predictions.get('start_ids'), predictions.get('end_ids'))
return (start_ids[0], end_ids[0])
def extract_entity_from_start_end_ids(self, orig_text, start_ids, end_ids):
entity_list = []
for (i, start_id) in enumerate(start_ids):
if (start_id == 0):
continue
j = (i + 1)
find_end_tag = False
while (j < len(end_ids)):
if (start_ids[j] == 1):
break
if (end_ids[j] == 1):
entity_list.append(''.join(orig_text[i:(j + 1)]))
find_end_tag = True
break
else:
j += 1
if (not find_end_tag):
entity_list.append(''.join(orig_text[i:(i + 1)]))
return entity_list
def predict_entitys_for_all_sample(self, text_data_Xs, query_lens, token_type_ids_list, query_class_list, src_sample_ids_list, orig_text_list):
result_list = []
cur_sample_id_buffer = 0
start_ids_buffer = []
end_ids_buffer = []
query_class_buffer = ''
for i in range(len(text_data_Xs)):
cur_text = text_data_Xs[i]
cur_query_len = query_lens[i]
cur_token_type_ids = token_type_ids_list[i]
cur_query_class = query_class_list[i]
cur_src_sample_id = src_sample_ids_list[i]
(start_ids, end_ids) = self.predict_mrc(cur_text, cur_query_len, cur_token_type_ids)
true_start_ids = start_ids[cur_query_len:].tolist()
true_end_ids = end_ids[cur_query_len:].tolist()
cur_query_class_str = ner_query_map.get('tags')[cur_query_class]
if ((query_class_buffer == '') or (len(start_ids_buffer) == 0)):
query_class_buffer = cur_query_class_str
start_ids_buffer.extend(true_start_ids)
end_ids_buffer.extend(true_end_ids)
cur_sample_id_buffer = cur_src_sample_id
elif ((cur_src_sample_id == cur_sample_id_buffer) and (cur_query_class_str == query_class_buffer)):
start_ids_buffer.extend(true_start_ids)
end_ids_buffer.extend(true_end_ids)
elif (cur_src_sample_id == cur_sample_id_buffer):
cur_orig_text = orig_text_list[cur_sample_id_buffer]
extracted_entity_list = self.extract_entity_from_start_end_ids(cur_orig_text, start_ids_buffer, end_ids_buffer)
if (len(result_list) == 0):
result_list.append({query_class_buffer: extracted_entity_list})
elif (cur_sample_id_buffer >= len(result_list)):
result_list.append({query_class_buffer: extracted_entity_list})
else:
result_list[cur_sample_id_buffer].update({query_class_buffer: extracted_entity_list})
query_class_buffer = cur_query_class_str
start_ids_buffer = true_start_ids
end_ids_buffer = true_end_ids
else:
cur_orig_text = orig_text_list[cur_sample_id_buffer]
extracted_entity_list = self.extract_entity_from_start_end_ids(cur_orig_text, start_ids_buffer, end_ids_buffer)
if (cur_sample_id_buffer >= len(result_list)):
result_list.append({query_class_buffer: extracted_entity_list})
else:
result_list[cur_sample_id_buffer].update({query_class_buffer: extracted_entity_list})
query_class_buffer = cur_query_class_str
start_ids_buffer = true_start_ids
end_ids_buffer = true_end_ids
cur_sample_id_buffer = cur_src_sample_id
cur_orig_text = orig_text_list[cur_sample_id_buffer]
extracted_entity_list = self.extract_entity_from_start_end_ids(cur_orig_text, start_ids_buffer, end_ids_buffer)
if (cur_sample_id_buffer >= len(result_list)):
result_list.append({query_class_buffer: extracted_entity_list})
else:
result_list[cur_sample_id_buffer].update({query_class_buffer: extracted_entity_list})
return result_list
def gen_micro_level_entity_span(self, type2entity_list):
entity_list_final = []
for type2entity in type2entity_list:
cur_tmp_list = []
for (slot_type, entity_list) in type2entity.items():
cur_tmp_list.extend(entity_list)
entity_list_final.append(list(set(cur_tmp_list)))
return entity_list_final |
def run_dummyrunner(number_of_dummies):
number_of_dummies = (str(int(number_of_dummies)) if number_of_dummies else 1)
cmdstr = [sys.executable, EVENNIA_DUMMYRUNNER, '-N', number_of_dummies]
config_file = os.path.join(SETTINGS_PATH, 'dummyrunner_settings.py')
if os.path.exists(config_file):
cmdstr.extend(['--config', config_file])
try:
call(cmdstr, env=getenv())
except KeyboardInterrupt:
pass |
class MAEMetricTest(unittest.TestCase):
clazz: Type[RecMetric] = MAEMetric
task_name: str = 'mae'
def test_unfused_mae(self) -> None:
rec_metric_value_test_launcher(target_clazz=MAEMetric, target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION, test_clazz=TestMAEMetric, metric_name='mae', task_names=['t1', 't2', 't3'], fused_update_limit=0, compute_on_all_ranks=False, should_validate_update=False, world_size=WORLD_SIZE, entry_point=metric_test_helper)
def test_fused_mae(self) -> None:
rec_metric_value_test_launcher(target_clazz=MAEMetric, target_compute_mode=RecComputeMode.FUSED_TASKS_COMPUTATION, test_clazz=TestMAEMetric, metric_name='mae', task_names=['t1', 't2', 't3'], fused_update_limit=0, compute_on_all_ranks=False, should_validate_update=False, world_size=WORLD_SIZE, entry_point=metric_test_helper) |
def target_df_without_window(spark_context, spark_session):
data = [{'id': 1, 'timestamp': '2016-04-11 12:03:21', 'feature1__avg': 350, 'feature2__count': 4}]
df = spark_session.read.json(spark_context.parallelize(data, 1))
df = df.withColumn(TIMESTAMP_COLUMN, df.timestamp.cast(DataType.TIMESTAMP.spark))
return df |
class Migration(migrations.Migration):
dependencies = [('projects', '0047_continuation')]
operations = [migrations.AlterField(model_name='membership', name='project', field=models.ForeignKey(help_text='The project for this membership.', on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='projects.Project', verbose_name='Project')), migrations.AlterField(model_name='membership', name='user', field=models.ForeignKey(help_text='The user for this membership.', on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to=settings.AUTH_USER_MODEL, verbose_name='User')), migrations.AlterField(model_name='project', name='user', field=models.ManyToManyField(help_text='The list of users for this project.', related_name='projects', through='projects.Membership', to=settings.AUTH_USER_MODEL, verbose_name='User'))] |
class ImportOrganizer():
def __init__(self, project):
self.project = project
self.import_tools = ImportTools(self.project)
def organize_imports(self, resource, offset=None):
return self._perform_command_on_import_tools(self.import_tools.organize_imports, resource, offset)
def expand_star_imports(self, resource, offset=None):
return self._perform_command_on_import_tools(self.import_tools.expand_stars, resource, offset)
def froms_to_imports(self, resource, offset=None):
return self._perform_command_on_import_tools(self.import_tools.froms_to_imports, resource, offset)
def relatives_to_absolutes(self, resource, offset=None):
return self._perform_command_on_import_tools(self.import_tools.relatives_to_absolutes, resource, offset)
def handle_long_imports(self, resource, offset=None):
return self._perform_command_on_import_tools(self.import_tools.handle_long_imports, resource, offset)
def _perform_command_on_import_tools(self, method, resource, offset):
pymodule = self.project.get_pymodule(resource)
before_performing = pymodule.source_code
import_filter = None
if (offset is not None):
import_filter = self._line_filter(pymodule.lines.get_line_number(offset))
result = method(pymodule, import_filter=import_filter)
if ((result is not None) and (result != before_performing)):
changes = ChangeSet((method.__name__.replace('_', ' ') + (' in <%s>' % resource.path)))
changes.add_change(ChangeContents(resource, result))
return changes
def _line_filter(self, lineno):
def import_filter(import_stmt):
return (import_stmt.start_line <= lineno < import_stmt.end_line)
return import_filter |
class _ProtocolEncoder(json.JSONEncoder):
def default(self, o: Any):
if isinstance(o, Performed):
return {'tag': 'Performed', 'contents': o.state}
elif isinstance(o, Stale):
return {'tag': 'Stale'}
elif isinstance(o, Timeout):
return {'tag': 'Timeout', 'contents': o.state}
elif isinstance(o, Events):
events: Any = [self.default(event) for event in o.events]
return {'tag': 'Events', 'contents': [events, o.state]}
elif isinstance(o, Error):
return {'tag': 'Error', 'errorMessage': o.error_message}
elif isinstance(o, Action):
return {'id': o.id, 'args': o.args, 'isEvent': o.isEvent, 'timeout': o.timeout}
else:
return json.JSONEncoder.default(self, o) |
def _topk_helper(g, input, k, dim, largest=True, sorted=False, out=None):
if (out is not None):
_unimplemented('TopK', 'Out parameter is not supported')
if (not _is_value(k)):
k = g.op('Constant', value_t=torch.tensor([k], dtype=torch.int64))
else:
k = g.op('Reshape', k, g.op('Constant', value_t=torch.tensor([1])))
return g.op('TopK', input, k, axis_i=dim, largest_i=largest, sorted_i=sorted, outputs=2) |
class RCNN(nn.Module):
def __init__(self, archi, device='cuda', checkpoint_path=None, share_memory=False, load_heads=False):
super().__init__()
self.device = device
self.feat_layer = '3'
if (archi == 'maskrcnn'):
self.model = models.detection.maskrcnn_resnet50_fpn(pretrained=(checkpoint_path is None), pretrained_backbone=(checkpoint_path is None), min_size=800)
elif (archi == 'fasterrcnn'):
self.model = models.detection.fasterrcnn_resnet50_fpn(pretrained=(checkpoint_path is None), pretrained_backbone=(checkpoint_path is None), min_size=224)
else:
raise ValueError('Unknown model type = {}'.format(archi))
if (archi == 'maskrcnn'):
self._transform = self.model.transform
else:
self._transform = Transforms.get_transform('default')
if (not load_heads):
for attr in ('backbone', 'body'):
self.model = getattr(self.model, attr)
if (checkpoint_path is not None):
self.load_from_checkpoint(checkpoint_path, load_heads, device, archi, 'backbone.body')
self.model = self.model.to(torch.device(device))
self.model = self.model.eval()
if share_memory:
self.model.share_memory()
if load_heads:
self.vocab_pred = {i: class_name for (i, class_name) in enumerate(constants.OBJECTS_ACTIONS)}
def extract(self, images):
if isinstance(self._transform, models.detection.transform.GeneralizedRCNNTransform):
images_normalized = self._transform(torch.stack([F.to_tensor(img) for img in images]))[0].tensors
else:
images_normalized = torch.stack([self._transform(img) for img in images])
images_normalized = images_normalized.to(torch.device(self.device))
model_body = self.model
if hasattr(self.model, 'backbone'):
model_body = self.model.backbone.body
features = model_body(images_normalized)
return features[self.feat_layer]
def load_from_checkpoint(self, checkpoint_path, load_heads, device, archi, prefix):
print('Loading RCNN checkpoint from {}'.format(checkpoint_path))
state_dict = torch.load(checkpoint_path, map_location=device)
if (not load_heads):
state_dict = {k.replace((prefix + '.'), ''): v for (k, v) in state_dict.items() if ((prefix + '.') in k)}
else:
(num_classes, in_features) = state_dict['roi_heads.box_predictor.cls_score.weight'].shape
box_predictor = models.detection.faster_rcnn.FastRCNNPredictor(in_features, num_classes)
self.model.roi_heads.box_predictor = box_predictor
if (archi == 'maskrcnn'):
in_features_mask = self.model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
mask_predictor = models.detection.mask_rcnn.MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
self.model.roi_heads.mask_predictor = mask_predictor
self.model.load_state_dict(state_dict)
def predict_objects(self, image, confidence_threshold=0.0, verbose=False):
image = F.to_tensor(image).to(torch.device(self.device))
output = self.model(image[None])[0]
preds = []
for pred_idx in range(len(output['scores'])):
score = output['scores'][pred_idx].cpu().item()
if (score < confidence_threshold):
continue
box = output['boxes'][pred_idx].cpu().numpy()
label = self.vocab_pred[output['labels'][pred_idx].cpu().item()]
if verbose:
print('{} at {}'.format(label, box))
pred = types.SimpleNamespace(label=label, box=box, score=score)
if ('masks' in output):
pred.mask = output['masks'][pred_idx].cpu().numpy()
preds.append(pred)
return preds |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.