hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f731b7d1d6104774ffc3c228a51322e1885b01cf | 18,640 | py | Python | meta_dataset/analysis/select_best_model.py | jishnujayakumar/meta-dataset | fac43975e7e8931bd9c9a9171268758e26469646 | [
"Apache-2.0"
] | 643 | 2019-03-05T18:42:45.000Z | 2022-03-27T21:20:46.000Z | meta_dataset/analysis/select_best_model.py | jishnujayakumar/meta-dataset | fac43975e7e8931bd9c9a9171268758e26469646 | [
"Apache-2.0"
] | 80 | 2019-03-16T15:42:49.000Z | 2022-03-30T22:22:31.000Z | meta_dataset/analysis/select_best_model.py | jishnujayakumar/meta-dataset | fac43975e7e8931bd9c9a9171268758e26469646 | [
"Apache-2.0"
] | 126 | 2019-03-14T01:36:32.000Z | 2022-03-26T17:33:11.000Z | # coding=utf-8
# Copyright 2021 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
r"""A script for choosing the best variant of a model automatically.
It takes as input the root directory of all experiments, and a list of names of
directories in that root, each storing the data of an experiment with multiple
variants accross which we want to select the best. Each experiment directory
should contain a directoy named 'summaries' that hosts subdirectories for the
different runs with each one containing event files. These event files are read
to figure out which is best in terms of mean validation accuracy, and at which
step of that run this best value occurs in.
For each of the experiment directories provided, the output information is saved
in a 'best.pklz' file in that directory. This file contains a dict with keys
'best_variant', 'best_valid_acc', and 'best_update_num' where the name of the
variant is simply the name of the sub-directory corresponding to that variant.
Example directory structure (after the script is ran):
Root contains: 'Exp1', 'Exp2'.
Exp1 contains: 'checkpoints', 'summaries', and best.pklz
summaries contains: '1', '2', '3', ..., '20'
'1' contains event files
'2' contains event files
...
'20' contains event files
Sample command:
# pylint: disable=line-too-long
python -m meta_dataset.analysis.select_best_model \
--alsologtostderr \
--all_experiments_root=<experiments_root> \
--experiment_dir_basenames=baseline_imagenet_icml2019_1/3602170,baselinefinetune_imagenet_icml2019_1/3581340
# pylint: enable=line-too-long
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import logging
import numpy as np
from six.moves import range
from six.moves import zip
import six.moves.cPickle as pkl
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string(
'all_experiments_root',
'',
'The overall experiments directory root.')
tf.flags.DEFINE_string(
'experiment_dir_basenames', ''
'baseline_imagenet_icml2019_1/3602170,'
'baselinefinetune_imagenet_icml2019_1/3581340',
'A comma-separated list of directory basenames. Adding each basename as a '
'suffix to FLAGS.all_experiments_root forms a path that stores the data of '
'an experiment with multiple variants accross which we want to select the '
'best. Each such path is expected to host a directory named "summaries" '
'that contains subdirectories for the different runs with each such '
'subdirectory containing event files.')
# TODO(etriantafillou): This assumes the variants to omit are the same for all
# experiments that model selection will be ran for which doesn't make much
# sense. Maybe just remove this altogether?
tf.flags.DEFINE_string(
'restrict_to_variants', '', 'A comma-separated list of '
'variants to restrict to for model selection. This is '
'useful for example for finding the best out of all '
'variants that use a specific embedding or image size.')
tf.flags.DEFINE_string(
'restrict_to_variants_by_range', '', 'A comma-separated list of '
'two integers that represent the start and end range (both inclusive) '
'of variant ids to restrict to.')
tf.flags.DEFINE_string(
'description', 'best', 'The description for the output. The output will '
'then be named as description.pklz and description.txt. For example, this '
'can be used to reflect that some variants were omitted.')
# The following two flags assume that the parameters of the experiments have
# been logged (they attempt to read from them). If this is not the case, the
# restrict_to_variants flag should be used instead.
tf.flags.DEFINE_string(
'restrict_to_architectures', '', 'The comma-separated names of the '
'embedding networks to restrict to for model selection.')
tf.flags.DEFINE_enum(
'restrict_to_pretrained_source', '', ['', 'scratch', 'imagenet'],
'The name of a pretrained_source to '
'restrict to for model selection.')
tf.flags.DEFINE_integer(
'smooth_window', 1, 'rolling average window to be '
'applied before the best model selection. '
'Set 1 for no smoothing.')
VALIDATION_ACCURACY_TAGS = (
'valid_acc/mean',
'mean valid acc',
'mean acc', # TODO(doersch): rather unclear tag written by trainer.py
)
def get_value_from_params_dir(params_dir, param_names):
"""Gets the first found value from `param_names` in `params_dir`."""
def _load_params(param_name, params_file, loader, mode):
with tf.io.gfile.GFile(params_file, mode) as f:
params = loader(f)
logging.info('Found params file %s', params_file)
return params[param_name]
for param_name in param_names:
try:
try:
return _load_params(param_name, os.path.join(params_dir, 'params.json'),
json.load, 'r')
except tf.errors.NotFoundError:
logging.info('%s does not exist in %s', 'params.json', params_dir)
try:
return _load_params(param_name, os.path.join(params_dir, 'params.pkl'),
pkl.load, 'rb')
except tf.errors.NotFoundError:
logging.info('%s does not exist in %s', 'params.pkl', params_dir)
except KeyError:
pass
raise ValueError('Did not find any of the following keys: %s' % param_names)
def get_paths_to_events(root_dir,
restrict_to_architectures,
restrict_to_pretrained_source,
restrict_to_variants=None):
"""Returns a dict that maps each variant name to its event file.
The name of the variant is the basename of the directory where it's stored.
Assumes the following directory organization root_dir contains a sub-directory
for every variant where event files can be found.
There may be more than one event file for each variant, e.g. a new one will be
created upon restarting an experiment that was pre-empted. So later event
files contain the summaries for larger values of 'step'. We need all of them
for determining the global 'best'.
Args:
root_dir: A str. The root directory of experiments of all models variants.
restrict_to_architectures: A list of names of architectures to restrict to
when choosing the best variant.
restrict_to_pretrained_source: A string. The pretrained_source to restrict
to when choosing the best variant.
restrict_to_variants: Optionally, a set of variant names to restrict to.
"""
params_dir = os.path.join(root_dir, 'params')
summary_dir = os.path.join(root_dir, 'summaries')
logging.info('Looking for parameters in params_dir: %s', params_dir)
logging.info('Looking for summaries in summary_dir: %s', summary_dir)
def get_variant_architecture(name):
"""Return the architecture of the given variant if recorded; o/w None."""
variant_params_dir = os.path.join(params_dir, name)
architecture = get_value_from_params_dir(
variant_params_dir,
(
'_gin.Learner.embedding_fn',
# The following are for backwards compatibility.
'_gin.Trainer.embedding_network',
'_gin.LearnerConfig.embedding_network',
))
return architecture
def get_variant_pretrained_source(name):
"""Return the pretrained src of the given variant if recorded; o/w None."""
variant_params_dir = os.path.join(params_dir, name)
pretrained_source = get_value_from_params_dir(
variant_params_dir, '_gin.Trainer.pretrained_source')
if not pretrained_source:
# Backwards compatibility.
pretrained_source = get_value_from_params_dir(
variant_params_dir, '_gin.LearnerConfig.pretrained_source')
return pretrained_source
def keep_variant(name):
"""Determine if the variant in directory name should be considered."""
value_error_msg = (
'Requested to restrict to an architecture or '
'pretrained_source but the given experiment does not '
'have its params recorded. Looked in: {}'.format(params_dir))
if restrict_to_architectures:
architecture = get_variant_architecture(name)
if architecture is None:
raise ValueError(value_error_msg)
valid_architecture = (not restrict_to_architectures or
architecture in restrict_to_architectures)
if restrict_to_pretrained_source:
pretrained_source = get_variant_pretrained_source(name)
if pretrained_source is None:
raise ValueError(value_error_msg)
valid_pretrained_source = (
not restrict_to_pretrained_source or
pretrained_source == restrict_to_pretrained_source)
valid_variant_name = True
if restrict_to_variants is not None:
valid_variant_name = name in restrict_to_variants
return (valid_architecture and valid_pretrained_source and
valid_variant_name)
variant_names = [
fname for fname in tf.io.gfile.listdir(summary_dir)
if tf.io.gfile.isdir(os.path.join(summary_dir, fname))
]
if not variant_names:
# Maybe there are no variants, and we are already in the directory that
# contains the summaries. In this case, we consider that the current
# directory (.) is the only variant.
variant_names = ['.']
# Further filter variant names based on the given restrictions.
variant_names = [name for name in variant_names if keep_variant(name)]
if not variant_names:
raise ValueError('Found no subdirectories in {}. Was expecting a '
'subdirectory per variant.'.format(summary_dir))
variant_paths = [
os.path.join(summary_dir, variant_dir) for variant_dir in variant_names
]
event_paths = {}
for variant_path, variant_name in zip(variant_paths, variant_names):
event_filenames = [
f_name for f_name in tf.io.gfile.listdir(variant_path)
if f_name.startswith('events.out.tfevents')
]
if len(event_filenames) < 1:
logging.warn('Skipping empty variant %s.', variant_path)
logging.info(
'Was expecting at least one event file '
'in directory %s. Instead, found %d.', variant_path,
len(event_filenames))
continue
event_paths[variant_name] = [
os.path.join(variant_path, event_filename)
for event_filename in event_filenames
]
logging.info('Found event files for variants: %s', list(event_paths.keys()))
return event_paths
# TODO(crisnv): add smooth_type='uniform' that defines the smooth policy
def moving_average(x, smooth_window):
"""Returns a smoothed version of x.
This smoothes the x array according to the smooth_window parameter.
Args:
x: The array to smooth.
smooth_window: An integer that defines the neighborhood to be used in
smoothing.
"""
conv_filter = getattr(moving_average, 'conv_filter', None)
if conv_filter is None or (moving_average.conv_filter_size != smooth_window):
moving_average.conv_filter = np.ones((smooth_window,)) / smooth_window
moving_average.conv_filter_size = smooth_window
# if smooth_window is even, pad accordingly to keep stream size
x = np.pad(x, (smooth_window // 2, smooth_window - 1 - (smooth_window // 2)),
'reflect')
return np.convolve(x, moving_average.conv_filter, mode='valid')
def extract_best_from_event_file(event_path, smooth_window, log_details=False):
"""Returns the best accuracy and the step it occurs in in the given events.
This searches the summaries written in a given event file, which may be only a
subset of the total summaries of a run, since the summaries of a run are
sometimes split into multiple event files.
Args:
event_path: A string. The path to an event file.
smooth_window: An integer that defines the neighborhood to be used in
smoothing before the argmax (use <=1 for no smoothing)
log_details: A boolean. Whether to log details regarding skipped event paths
in which locating the validation accuracy tag failed.
"""
steps, valid_accs = [], []
try:
for event in tf.train.summary_iterator(event_path):
step = event.step
for value in event.summary.value:
if any(
valid_tag in value.tag for valid_tag in VALIDATION_ACCURACY_TAGS):
steps.append(step)
valid_accs.append(value.simple_value)
except tf.errors.DataLossError:
if log_details:
tf.logging.info(
'Omitting events from event_path {} because '
'tf.train.summary_iterator(event_path) failed.'.format(event_path))
return 0, 0
if not valid_accs:
# Could happen if there is no DataLossError above but for some reason
# there is no validation accuracy tag found in the summary values.
tf.logging.info(
'Did not find any validation accuracy tags ({}) in event_path {}'
.format(' or '.join(VALIDATION_ACCURACY_TAGS), event_path))
return 0, 0
if smooth_window > 1:
valid_accs = moving_average(valid_accs, smooth_window)
argmax_ind = np.argmax(valid_accs)
best_acc = valid_accs[argmax_ind]
best_step = steps[argmax_ind]
if log_details:
tf.logging.info('Successfully read event_path {} with best_acc {}'.format(
event_path, best_acc))
return best_acc, best_step
def extract_best_from_variant(event_paths, smooth_window):
"""Returns the best accuracy and the step it occurs in for the given run.
Args:
event_paths: A list of strings. The event files of the given run.
smooth_window: An integer that defines the neighborhood to be used in
smoothing before the argmax (use <=1 for no smoothing)
Raises:
RuntimeError: No 'valid' event file for the given variant ('valid' here
refers to an event file that has a validation accuracy tag).
"""
best_step = best_acc = -1
for event_path in event_paths:
best_acc_, best_step_ = extract_best_from_event_file(
event_path, smooth_window)
if best_acc_ > best_acc:
best_acc = best_acc_
best_step = best_step_
if best_acc <= 0:
raise RuntimeError('Something went wrong with the summary event reading.')
return best_acc, best_step
def main(argv):
del argv
experiment_paths = [
os.path.join(FLAGS.all_experiments_root, basename)
for basename in FLAGS.experiment_dir_basenames.split(',')
]
# Perform model selection for each provided experiment root.
for root_experiment_dir in experiment_paths:
stars_string = '\n**************************************\n'
architecture_string = ''
if FLAGS.restrict_to_architectures:
architecture_string = ' out of the {} variants'.format(
FLAGS.restrict_to_architectures)
logging.info('%sSelecting the best variant for: %s%s.%s', stars_string,
root_experiment_dir, architecture_string, stars_string)
if FLAGS.restrict_to_variants_by_range and FLAGS.restrict_to_variants:
raise ValueError('Please provide only one of '
'FLAGS.restrict_to_variants_by_range and '
'FLAGS.restrict_to_variants, not both.')
restrict_to_variants = None
if FLAGS.restrict_to_variants_by_range:
start, end = FLAGS.restrict_to_variants_by_range.split(',')
start, end = int(start), int(end)
restrict_to_variants = set(
[str(variant_id) for variant_id in range(start, end + 1)])
if FLAGS.restrict_to_variants:
restrict_to_variants = set(FLAGS.restrict_to_variants.split(','))
restrict_to_architectures = []
if FLAGS.restrict_to_architectures:
restrict_to_architectures = FLAGS.restrict_to_architectures.split(',')
smooth_window = FLAGS.smooth_window
event_paths = get_paths_to_events(
root_experiment_dir,
restrict_to_architectures,
FLAGS.restrict_to_pretrained_source,
restrict_to_variants=restrict_to_variants)
# Read the event file of each variant to find the highest mean validation
# accuracy reached with it.
best_variant = ''
best_valid_acc = -1
best_step = -1
for variant_name, event_path in event_paths.items():
best_valid_acc_, best_step_ = extract_best_from_variant(
event_path, smooth_window)
if best_valid_acc_ > best_valid_acc:
best_variant = variant_name
best_valid_acc = best_valid_acc_
best_step = best_step_
output_dict = {
'best_variant': best_variant,
'best_valid_acc': best_valid_acc,
'best_update_num': best_step
}
# Create a more informative description if necessary.
description = FLAGS.description
if FLAGS.restrict_to_architectures and FLAGS.description == 'best':
description += '_{}'.format(FLAGS.restrict_to_architectures)
if (FLAGS.restrict_to_pretrained_source and FLAGS.description == 'best'):
if FLAGS.restrict_to_pretrained_source == 'scratch':
description += '_trained_from_scratch'
else:
description += '_pretrained_on_{}'.format(
FLAGS.restrict_to_pretrained_source)
if FLAGS.smooth_window > 1:
description += '_smoothed_by_window_{}'.format(smooth_window)
output_path_pklz = os.path.join(root_experiment_dir,
'{}.pklz'.format(description))
with tf.io.gfile.GFile(output_path_pklz, 'wb') as f:
pkl.dump(output_dict, f, protocol=pkl.HIGHEST_PROTOCOL)
# Also write this info as a .txt file for easier reading.
output_path_txt = os.path.join(root_experiment_dir,
'{}.txt'.format(description))
with tf.io.gfile.GFile(output_path_txt, 'w') as f:
f.write(
'best_variant: {}\nbest_valid_acc: {}\nbest_update_num: {}\n'.format(
best_variant, best_valid_acc, best_step))
logging.info(
'Best variant: %s. Best valid acc: %s. Best update num: %d. '
'Just wrote this info to %s and %s', best_variant, best_valid_acc,
best_step, output_path_pklz, output_path_txt)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.app.run(main)
| 39.82906 | 110 | 0.712554 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import logging
import numpy as np
from six.moves import range
from six.moves import zip
import six.moves.cPickle as pkl
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string(
'all_experiments_root',
'',
'The overall experiments directory root.')
tf.flags.DEFINE_string(
'experiment_dir_basenames', ''
'baseline_imagenet_icml2019_1/3602170,'
'baselinefinetune_imagenet_icml2019_1/3581340',
'A comma-separated list of directory basenames. Adding each basename as a '
'suffix to FLAGS.all_experiments_root forms a path that stores the data of '
'an experiment with multiple variants accross which we want to select the '
'best. Each such path is expected to host a directory named "summaries" '
'that contains subdirectories for the different runs with each such '
'subdirectory containing event files.')
# sense. Maybe just remove this altogether?
tf.flags.DEFINE_string(
'restrict_to_variants', '', 'A comma-separated list of '
'variants to restrict to for model selection. This is '
'useful for example for finding the best out of all '
'variants that use a specific embedding or image size.')
tf.flags.DEFINE_string(
'restrict_to_variants_by_range', '', 'A comma-separated list of '
'two integers that represent the start and end range (both inclusive) '
'of variant ids to restrict to.')
tf.flags.DEFINE_string(
'description', 'best', 'The description for the output. The output will '
'then be named as description.pklz and description.txt. For example, this '
'can be used to reflect that some variants were omitted.')
# The following two flags assume that the parameters of the experiments have
# been logged (they attempt to read from them). If this is not the case, the
# restrict_to_variants flag should be used instead.
tf.flags.DEFINE_string(
'restrict_to_architectures', '', 'The comma-separated names of the '
'embedding networks to restrict to for model selection.')
tf.flags.DEFINE_enum(
'restrict_to_pretrained_source', '', ['', 'scratch', 'imagenet'],
'The name of a pretrained_source to '
'restrict to for model selection.')
tf.flags.DEFINE_integer(
'smooth_window', 1, 'rolling average window to be '
'applied before the best model selection. '
'Set 1 for no smoothing.')
VALIDATION_ACCURACY_TAGS = (
'valid_acc/mean',
'mean valid acc',
'mean acc', # TODO(doersch): rather unclear tag written by trainer.py
)
def get_value_from_params_dir(params_dir, param_names):
def _load_params(param_name, params_file, loader, mode):
with tf.io.gfile.GFile(params_file, mode) as f:
params = loader(f)
logging.info('Found params file %s', params_file)
return params[param_name]
for param_name in param_names:
try:
try:
return _load_params(param_name, os.path.join(params_dir, 'params.json'),
json.load, 'r')
except tf.errors.NotFoundError:
logging.info('%s does not exist in %s', 'params.json', params_dir)
try:
return _load_params(param_name, os.path.join(params_dir, 'params.pkl'),
pkl.load, 'rb')
except tf.errors.NotFoundError:
logging.info('%s does not exist in %s', 'params.pkl', params_dir)
except KeyError:
pass
raise ValueError('Did not find any of the following keys: %s' % param_names)
def get_paths_to_events(root_dir,
restrict_to_architectures,
restrict_to_pretrained_source,
restrict_to_variants=None):
params_dir = os.path.join(root_dir, 'params')
summary_dir = os.path.join(root_dir, 'summaries')
logging.info('Looking for parameters in params_dir: %s', params_dir)
logging.info('Looking for summaries in summary_dir: %s', summary_dir)
def get_variant_architecture(name):
variant_params_dir = os.path.join(params_dir, name)
architecture = get_value_from_params_dir(
variant_params_dir,
(
'_gin.Learner.embedding_fn',
# The following are for backwards compatibility.
'_gin.Trainer.embedding_network',
'_gin.LearnerConfig.embedding_network',
))
return architecture
def get_variant_pretrained_source(name):
variant_params_dir = os.path.join(params_dir, name)
pretrained_source = get_value_from_params_dir(
variant_params_dir, '_gin.Trainer.pretrained_source')
if not pretrained_source:
# Backwards compatibility.
pretrained_source = get_value_from_params_dir(
variant_params_dir, '_gin.LearnerConfig.pretrained_source')
return pretrained_source
def keep_variant(name):
value_error_msg = (
'Requested to restrict to an architecture or '
'pretrained_source but the given experiment does not '
'have its params recorded. Looked in: {}'.format(params_dir))
if restrict_to_architectures:
architecture = get_variant_architecture(name)
if architecture is None:
raise ValueError(value_error_msg)
valid_architecture = (not restrict_to_architectures or
architecture in restrict_to_architectures)
if restrict_to_pretrained_source:
pretrained_source = get_variant_pretrained_source(name)
if pretrained_source is None:
raise ValueError(value_error_msg)
valid_pretrained_source = (
not restrict_to_pretrained_source or
pretrained_source == restrict_to_pretrained_source)
valid_variant_name = True
if restrict_to_variants is not None:
valid_variant_name = name in restrict_to_variants
return (valid_architecture and valid_pretrained_source and
valid_variant_name)
variant_names = [
fname for fname in tf.io.gfile.listdir(summary_dir)
if tf.io.gfile.isdir(os.path.join(summary_dir, fname))
]
if not variant_names:
# Maybe there are no variants, and we are already in the directory that
# contains the summaries. In this case, we consider that the current
# directory (.) is the only variant.
variant_names = ['.']
# Further filter variant names based on the given restrictions.
variant_names = [name for name in variant_names if keep_variant(name)]
if not variant_names:
raise ValueError('Found no subdirectories in {}. Was expecting a '
'subdirectory per variant.'.format(summary_dir))
variant_paths = [
os.path.join(summary_dir, variant_dir) for variant_dir in variant_names
]
event_paths = {}
for variant_path, variant_name in zip(variant_paths, variant_names):
event_filenames = [
f_name for f_name in tf.io.gfile.listdir(variant_path)
if f_name.startswith('events.out.tfevents')
]
if len(event_filenames) < 1:
logging.warn('Skipping empty variant %s.', variant_path)
logging.info(
'Was expecting at least one event file '
'in directory %s. Instead, found %d.', variant_path,
len(event_filenames))
continue
event_paths[variant_name] = [
os.path.join(variant_path, event_filename)
for event_filename in event_filenames
]
logging.info('Found event files for variants: %s', list(event_paths.keys()))
return event_paths
# TODO(crisnv): add smooth_type='uniform' that defines the smooth policy
def moving_average(x, smooth_window):
conv_filter = getattr(moving_average, 'conv_filter', None)
if conv_filter is None or (moving_average.conv_filter_size != smooth_window):
moving_average.conv_filter = np.ones((smooth_window,)) / smooth_window
moving_average.conv_filter_size = smooth_window
# if smooth_window is even, pad accordingly to keep stream size
x = np.pad(x, (smooth_window // 2, smooth_window - 1 - (smooth_window // 2)),
'reflect')
return np.convolve(x, moving_average.conv_filter, mode='valid')
def extract_best_from_event_file(event_path, smooth_window, log_details=False):
steps, valid_accs = [], []
try:
for event in tf.train.summary_iterator(event_path):
step = event.step
for value in event.summary.value:
if any(
valid_tag in value.tag for valid_tag in VALIDATION_ACCURACY_TAGS):
steps.append(step)
valid_accs.append(value.simple_value)
except tf.errors.DataLossError:
if log_details:
tf.logging.info(
'Omitting events from event_path {} because '
'tf.train.summary_iterator(event_path) failed.'.format(event_path))
return 0, 0
if not valid_accs:
# Could happen if there is no DataLossError above but for some reason
# there is no validation accuracy tag found in the summary values.
tf.logging.info(
'Did not find any validation accuracy tags ({}) in event_path {}'
.format(' or '.join(VALIDATION_ACCURACY_TAGS), event_path))
return 0, 0
if smooth_window > 1:
valid_accs = moving_average(valid_accs, smooth_window)
argmax_ind = np.argmax(valid_accs)
best_acc = valid_accs[argmax_ind]
best_step = steps[argmax_ind]
if log_details:
tf.logging.info('Successfully read event_path {} with best_acc {}'.format(
event_path, best_acc))
return best_acc, best_step
def extract_best_from_variant(event_paths, smooth_window):
best_step = best_acc = -1
for event_path in event_paths:
best_acc_, best_step_ = extract_best_from_event_file(
event_path, smooth_window)
if best_acc_ > best_acc:
best_acc = best_acc_
best_step = best_step_
if best_acc <= 0:
raise RuntimeError('Something went wrong with the summary event reading.')
return best_acc, best_step
def main(argv):
del argv
experiment_paths = [
os.path.join(FLAGS.all_experiments_root, basename)
for basename in FLAGS.experiment_dir_basenames.split(',')
]
# Perform model selection for each provided experiment root.
for root_experiment_dir in experiment_paths:
stars_string = '\n**************************************\n'
architecture_string = ''
if FLAGS.restrict_to_architectures:
architecture_string = ' out of the {} variants'.format(
FLAGS.restrict_to_architectures)
logging.info('%sSelecting the best variant for: %s%s.%s', stars_string,
root_experiment_dir, architecture_string, stars_string)
if FLAGS.restrict_to_variants_by_range and FLAGS.restrict_to_variants:
raise ValueError('Please provide only one of '
'FLAGS.restrict_to_variants_by_range and '
'FLAGS.restrict_to_variants, not both.')
restrict_to_variants = None
if FLAGS.restrict_to_variants_by_range:
start, end = FLAGS.restrict_to_variants_by_range.split(',')
start, end = int(start), int(end)
restrict_to_variants = set(
[str(variant_id) for variant_id in range(start, end + 1)])
if FLAGS.restrict_to_variants:
restrict_to_variants = set(FLAGS.restrict_to_variants.split(','))
restrict_to_architectures = []
if FLAGS.restrict_to_architectures:
restrict_to_architectures = FLAGS.restrict_to_architectures.split(',')
smooth_window = FLAGS.smooth_window
event_paths = get_paths_to_events(
root_experiment_dir,
restrict_to_architectures,
FLAGS.restrict_to_pretrained_source,
restrict_to_variants=restrict_to_variants)
# Read the event file of each variant to find the highest mean validation
# accuracy reached with it.
best_variant = ''
best_valid_acc = -1
best_step = -1
for variant_name, event_path in event_paths.items():
best_valid_acc_, best_step_ = extract_best_from_variant(
event_path, smooth_window)
if best_valid_acc_ > best_valid_acc:
best_variant = variant_name
best_valid_acc = best_valid_acc_
best_step = best_step_
output_dict = {
'best_variant': best_variant,
'best_valid_acc': best_valid_acc,
'best_update_num': best_step
}
# Create a more informative description if necessary.
description = FLAGS.description
if FLAGS.restrict_to_architectures and FLAGS.description == 'best':
description += '_{}'.format(FLAGS.restrict_to_architectures)
if (FLAGS.restrict_to_pretrained_source and FLAGS.description == 'best'):
if FLAGS.restrict_to_pretrained_source == 'scratch':
description += '_trained_from_scratch'
else:
description += '_pretrained_on_{}'.format(
FLAGS.restrict_to_pretrained_source)
if FLAGS.smooth_window > 1:
description += '_smoothed_by_window_{}'.format(smooth_window)
output_path_pklz = os.path.join(root_experiment_dir,
'{}.pklz'.format(description))
with tf.io.gfile.GFile(output_path_pklz, 'wb') as f:
pkl.dump(output_dict, f, protocol=pkl.HIGHEST_PROTOCOL)
# Also write this info as a .txt file for easier reading.
output_path_txt = os.path.join(root_experiment_dir,
'{}.txt'.format(description))
with tf.io.gfile.GFile(output_path_txt, 'w') as f:
f.write(
'best_variant: {}\nbest_valid_acc: {}\nbest_update_num: {}\n'.format(
best_variant, best_valid_acc, best_step))
logging.info(
'Best variant: %s. Best valid acc: %s. Best update num: %d. '
'Just wrote this info to %s and %s', best_variant, best_valid_acc,
best_step, output_path_pklz, output_path_txt)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.app.run(main)
| true | true |
f731b83aa10a8eac18891fcac6e5c14b63d9aa57 | 355 | py | Python | nikebot/__version__.py | olegaobini/NikeBot | fec0943c9dd215d05403bedfc69ef8a8ac0b228c | [
"MIT"
] | null | null | null | nikebot/__version__.py | olegaobini/NikeBot | fec0943c9dd215d05403bedfc69ef8a8ac0b228c | [
"MIT"
] | null | null | null | nikebot/__version__.py | olegaobini/NikeBot | fec0943c9dd215d05403bedfc69ef8a8ac0b228c | [
"MIT"
] | null | null | null | __logo__ = """
NikeBot
"""
__title__ = 'nikebotandroid'
__description__ = 'A retail automation bot for the Nike mobile app'
__url__ = 'https: // github.com/olegaobini/NikeBot'
__version__ = '0.0.1'
__debug_mode__ = False
__author__ = 'Olega Obini'
__author_email__ = 'obiniolega@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2022 Olega Obini'
| 27.307692 | 67 | 0.752113 | __logo__ = """
NikeBot
"""
__title__ = 'nikebotandroid'
__description__ = 'A retail automation bot for the Nike mobile app'
__url__ = 'https: // github.com/olegaobini/NikeBot'
__version__ = '0.0.1'
__debug_mode__ = False
__author__ = 'Olega Obini'
__author_email__ = 'obiniolega@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2022 Olega Obini'
| true | true |
f731b843168340ad5c3d5f4674b0c14240c39751 | 5,587 | py | Python | gremlin-python/src/main/python/tests/process/test_traversal.py | helpspace-co/tinkerpop | a31e8f402237de3491397d4f8bbd6a9761cd9068 | [
"Apache-2.0"
] | 1,425 | 2016-06-13T06:08:39.000Z | 2022-03-28T09:02:43.000Z | gremlin-python/src/main/python/tests/process/test_traversal.py | helpspace-co/tinkerpop | a31e8f402237de3491397d4f8bbd6a9761cd9068 | [
"Apache-2.0"
] | 1,049 | 2016-06-10T10:25:59.000Z | 2022-03-30T11:25:44.000Z | gremlin-python/src/main/python/tests/process/test_traversal.py | helpspace-co/tinkerpop | a31e8f402237de3491397d4f8bbd6a9761cd9068 | [
"Apache-2.0"
] | 732 | 2016-06-13T20:53:51.000Z | 2022-03-30T06:49:29.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
from pytest import fail
from gremlin_python.structure.graph import Graph
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.process.traversal import P
from gremlin_python.process.traversal import Binding, Bindings
from gremlin_python.process.graph_traversal import __
class TestTraversal(object):
def test_bytecode(self):
g = traversal().withGraph(Graph())
bytecode = g.V().out("created").bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 2 == len(bytecode.step_instructions)
assert "V" == bytecode.step_instructions[0][0]
assert "out" == bytecode.step_instructions[1][0]
assert "created" == bytecode.step_instructions[1][1]
assert 1 == len(bytecode.step_instructions[0])
assert 2 == len(bytecode.step_instructions[1])
##
bytecode = g.withSack(1).E().groupCount().by("weight").bytecode
assert 0 == len(bytecode.bindings.keys())
assert 1 == len(bytecode.source_instructions)
assert "withSack" == bytecode.source_instructions[0][0]
assert 1 == bytecode.source_instructions[0][1]
assert 3 == len(bytecode.step_instructions)
assert "E" == bytecode.step_instructions[0][0]
assert "groupCount" == bytecode.step_instructions[1][0]
assert "by" == bytecode.step_instructions[2][0]
assert "weight" == bytecode.step_instructions[2][1]
assert 1 == len(bytecode.step_instructions[0])
assert 1 == len(bytecode.step_instructions[1])
assert 2 == len(bytecode.step_instructions[2])
##
bytecode = g.V(Bindings.of('a', [1, 2, 3])) \
.out(Bindings.of('b', 'created')) \
.where(__.in_(Bindings.of('c', 'created'), Bindings.of('d', 'knows')) \
.count().is_(Bindings.of('e', P.gt(2)))).bytecode
assert 5 == len(bytecode.bindings.keys())
assert [1,2,3] == bytecode.bindings['a']
assert 'created' == bytecode.bindings['b']
assert 'created' == bytecode.bindings['c']
assert 'knows' == bytecode.bindings['d']
assert P.gt(2) == bytecode.bindings['e']
assert Binding('b', 'created') == bytecode.step_instructions[1][1]
assert 'binding[b=created]' == str(bytecode.step_instructions[1][1])
assert isinstance(hash(bytecode.step_instructions[1][1]), int)
def test_P(self):
# verify that the order of operations is respected
assert "and(eq(a),lt(b))" == str(P.eq("a").and_(P.lt("b")))
assert "and(or(lt(b),gt(c)),neq(d))" == str(P.lt("b").or_(P.gt("c")).and_(P.neq("d")))
assert "and(or(lt(b),gt(c)),or(neq(d),gte(e)))" == str(
P.lt("b").or_(P.gt("c")).and_(P.neq("d").or_(P.gte("e"))))
def test_anonymous_traversal(self):
bytecode = __.__(1).bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 1 == len(bytecode.step_instructions)
assert "inject" == bytecode.step_instructions[0][0]
assert 1 == bytecode.step_instructions[0][1]
##
bytecode = __.start().bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 0 == len(bytecode.step_instructions)
def test_clone_traversal(self):
g = traversal().withGraph(Graph())
original = g.V().out("created")
clone = original.clone().out("knows")
cloneClone = clone.clone().out("created")
assert 2 == len(original.bytecode.step_instructions)
assert 3 == len(clone.bytecode.step_instructions)
assert 4 == len(cloneClone.bytecode.step_instructions)
original.has("person", "name", "marko")
clone.V().out()
assert 3 == len(original.bytecode.step_instructions)
assert 5 == len(clone.bytecode.step_instructions)
assert 4 == len(cloneClone.bytecode.step_instructions)
def test_no_sugar_for_magic_methods(self):
g = traversal().withGraph(Graph())
t = g.V().age
assert 2 == len(t.bytecode.step_instructions)
try:
t = g.V().__len__
fail("can't do sugar with magic")
except AttributeError as err:
assert str(err) == 'Python magic methods or keys starting with double underscore cannot be used for Gremlin sugar - prefer values(__len__)'
def test_enforce_anonymous_child_traversal(self):
g = traversal().withGraph(Graph())
g.V(0).addE("self").to(__.V(1))
try:
g.V(0).addE("self").to(g.V(1))
assert false
except TypeError:
pass
| 42.325758 | 151 | 0.642742 |
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
from pytest import fail
from gremlin_python.structure.graph import Graph
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.process.traversal import P
from gremlin_python.process.traversal import Binding, Bindings
from gremlin_python.process.graph_traversal import __
class TestTraversal(object):
def test_bytecode(self):
g = traversal().withGraph(Graph())
bytecode = g.V().out("created").bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 2 == len(bytecode.step_instructions)
assert "V" == bytecode.step_instructions[0][0]
assert "out" == bytecode.step_instructions[1][0]
assert "created" == bytecode.step_instructions[1][1]
assert 1 == len(bytecode.step_instructions[0])
assert 2 == len(bytecode.step_instructions[1])
bytecode = g.withSack(1).E().groupCount().by("weight").bytecode
assert 0 == len(bytecode.bindings.keys())
assert 1 == len(bytecode.source_instructions)
assert "withSack" == bytecode.source_instructions[0][0]
assert 1 == bytecode.source_instructions[0][1]
assert 3 == len(bytecode.step_instructions)
assert "E" == bytecode.step_instructions[0][0]
assert "groupCount" == bytecode.step_instructions[1][0]
assert "by" == bytecode.step_instructions[2][0]
assert "weight" == bytecode.step_instructions[2][1]
assert 1 == len(bytecode.step_instructions[0])
assert 1 == len(bytecode.step_instructions[1])
assert 2 == len(bytecode.step_instructions[2])
bytecode = g.V(Bindings.of('a', [1, 2, 3])) \
.out(Bindings.of('b', 'created')) \
.where(__.in_(Bindings.of('c', 'created'), Bindings.of('d', 'knows')) \
.count().is_(Bindings.of('e', P.gt(2)))).bytecode
assert 5 == len(bytecode.bindings.keys())
assert [1,2,3] == bytecode.bindings['a']
assert 'created' == bytecode.bindings['b']
assert 'created' == bytecode.bindings['c']
assert 'knows' == bytecode.bindings['d']
assert P.gt(2) == bytecode.bindings['e']
assert Binding('b', 'created') == bytecode.step_instructions[1][1]
assert 'binding[b=created]' == str(bytecode.step_instructions[1][1])
assert isinstance(hash(bytecode.step_instructions[1][1]), int)
def test_P(self):
assert "and(eq(a),lt(b))" == str(P.eq("a").and_(P.lt("b")))
assert "and(or(lt(b),gt(c)),neq(d))" == str(P.lt("b").or_(P.gt("c")).and_(P.neq("d")))
assert "and(or(lt(b),gt(c)),or(neq(d),gte(e)))" == str(
P.lt("b").or_(P.gt("c")).and_(P.neq("d").or_(P.gte("e"))))
def test_anonymous_traversal(self):
bytecode = __.__(1).bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 1 == len(bytecode.step_instructions)
assert "inject" == bytecode.step_instructions[0][0]
assert 1 == bytecode.step_instructions[0][1]
bytecode = __.start().bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 0 == len(bytecode.step_instructions)
def test_clone_traversal(self):
g = traversal().withGraph(Graph())
original = g.V().out("created")
clone = original.clone().out("knows")
cloneClone = clone.clone().out("created")
assert 2 == len(original.bytecode.step_instructions)
assert 3 == len(clone.bytecode.step_instructions)
assert 4 == len(cloneClone.bytecode.step_instructions)
original.has("person", "name", "marko")
clone.V().out()
assert 3 == len(original.bytecode.step_instructions)
assert 5 == len(clone.bytecode.step_instructions)
assert 4 == len(cloneClone.bytecode.step_instructions)
def test_no_sugar_for_magic_methods(self):
g = traversal().withGraph(Graph())
t = g.V().age
assert 2 == len(t.bytecode.step_instructions)
try:
t = g.V().__len__
fail("can't do sugar with magic")
except AttributeError as err:
assert str(err) == 'Python magic methods or keys starting with double underscore cannot be used for Gremlin sugar - prefer values(__len__)'
def test_enforce_anonymous_child_traversal(self):
g = traversal().withGraph(Graph())
g.V(0).addE("self").to(__.V(1))
try:
g.V(0).addE("self").to(g.V(1))
assert false
except TypeError:
pass
| true | true |
f731b86cd45adfb32aa7de95f369a6218bdbc653 | 42,404 | py | Python | tests/test_data.py | sdrobert/pydrobert-pytorch | 7abad0dbb2e80b4267aebcee492aa9fd7d83ea3f | [
"Apache-2.0"
] | 14 | 2019-01-04T21:19:55.000Z | 2021-01-06T16:01:03.000Z | tests/test_data.py | sdrobert/pydrobert-pytorch | 7abad0dbb2e80b4267aebcee492aa9fd7d83ea3f | [
"Apache-2.0"
] | 6 | 2021-04-17T23:34:57.000Z | 2022-02-11T00:49:41.000Z | tests/test_data.py | sdrobert/pydrobert-pytorch | 7abad0dbb2e80b4267aebcee492aa9fd7d83ea3f | [
"Apache-2.0"
] | 1 | 2020-05-19T08:03:43.000Z | 2020-05-19T08:03:43.000Z | # Copyright 2021 Sean Robertson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from itertools import repeat
from io import StringIO
import pytest
import torch
import torch.utils.data
import pydrobert.torch.data as data
from pydrobert.torch import INDEX_PAD_VALUE
@pytest.mark.cpu
@pytest.mark.parametrize("left", [0, 1, 100])
@pytest.mark.parametrize("right", [0, 1, 100])
@pytest.mark.parametrize("T", [1, 5, 10])
def test_extract_window(left, right, T):
signal = torch.arange(T).view(-1, 1).expand(-1, 10)
for frame_idx in range(T):
window = data.extract_window(signal, frame_idx, left, right)
left_pad = max(left - frame_idx, 0)
right_pad = max(frame_idx + right + 1 - T, 0)
assert tuple(window.shape) == (1 + left + right, 10)
if left_pad:
assert torch.all(window[:left_pad] == torch.tensor([0]))
if right_pad:
assert torch.all(window[-right_pad:] == torch.tensor([T - 1]))
assert torch.all(
window[left_pad : 1 + left + right - right_pad]
== torch.arange(
frame_idx - left + left_pad, frame_idx + right - right_pad + 1
)
.view(-1, 1)
.expand(-1, 10)
)
@pytest.mark.cpu
@pytest.mark.parametrize("num_utts", [1, 2, 10])
@pytest.mark.parametrize("file_prefix", ["prefix_", ""])
@pytest.mark.parametrize("eos", [1000, None])
@pytest.mark.parametrize("sos", [2000, None])
@pytest.mark.parametrize("feat_dtype", [torch.float, torch.int])
def test_valid_spect_data_set(
temp_dir, num_utts, file_prefix, populate_torch_dir, sos, eos, feat_dtype
):
feats, _, _, _, _, utt_ids = populate_torch_dir(
temp_dir,
num_utts,
file_prefix=file_prefix,
include_ali=False,
include_ref=False,
feat_dtype=feat_dtype,
)
# note that this'll just resave the same features if there's no file
# prefix. If there is, these ought to be ignored by the data set
populate_torch_dir(
temp_dir, num_utts, include_ali=False, include_ref=False, feat_dtype=feat_dtype
)
if not os.path.isdir(os.path.join(temp_dir, "feat", "fake")):
os.makedirs(os.path.join(temp_dir, "feat", "fake"))
torch.save(
torch.randint(100, (10, 5), dtype=feat_dtype),
os.path.join(temp_dir, "feat", "fake", file_prefix + "fake.pt"),
)
data_set = data.SpectDataSet(temp_dir, file_prefix=file_prefix, eos=eos)
assert not data_set.has_ali and not data_set.has_ref
assert len(utt_ids) == len(data_set.utt_ids)
assert all(utt_a == utt_b for (utt_a, utt_b) in zip(utt_ids, data_set.utt_ids))
assert all(
ali_b is None and ref_b is None and torch.allclose(feat_a, feat_b)
for (feat_a, (feat_b, ali_b, ref_b)) in zip(feats, data_set)
)
feats, alis, refs, _, _, utt_ids = populate_torch_dir(
temp_dir, num_utts, file_prefix=file_prefix, feat_dtype=feat_dtype
)
if sos is not None:
sos_sym = torch.full((3,), -1, dtype=torch.long)
sos_sym[0] = sos
sos_sym = sos_sym.unsqueeze(0)
refs = [torch.cat([sos_sym, x]) for x in refs]
if eos is not None:
eos_sym = torch.full((3,), -1, dtype=torch.long)
eos_sym[0] = eos
eos_sym = eos_sym.unsqueeze(0)
refs = [torch.cat([x, eos_sym]) for x in refs]
data_set = data.SpectDataSet(temp_dir, file_prefix=file_prefix, sos=sos, eos=eos)
assert data_set.has_ali and data_set.has_ref
assert len(utt_ids) == len(data_set.utt_ids)
assert all(utt_a == utt_b for (utt_a, utt_b) in zip(utt_ids, data_set.utt_ids))
assert all(
torch.all(ali_a == ali_b)
and torch.all(ref_a == ref_b)
and feat_a.dtype == feat_b.dtype
and torch.allclose(feat_a, feat_b)
for ((feat_a, ali_a, ref_a), (feat_b, ali_b, ref_b)) in zip(
zip(feats, alis, refs), data_set
)
)
subset_ids = data_set.utt_ids[: num_utts // 2]
data_set = data.SpectDataSet(
temp_dir, file_prefix=file_prefix, subset_ids=set(subset_ids), sos=sos, eos=eos
)
assert all(utt_a == utt_b for (utt_a, utt_b) in zip(subset_ids, data_set.utt_ids))
assert all(
torch.all(ali_a == ali_b)
and torch.all(ref_a == ref_b)
and torch.allclose(feat_a, feat_b)
for ((feat_a, ali_a, ref_a), (feat_b, ali_b, ref_b)) in zip(
zip(feats[: num_utts // 2], alis[: num_utts // 2], refs[: num_utts // 2]),
data_set,
)
)
@pytest.mark.cpu
def test_spect_data_set_warnings(temp_dir):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
torch.save(torch.rand(3, 3), os.path.join(feat_dir, "a.pt"))
torch.save(torch.rand(4, 3), os.path.join(feat_dir, "b.pt"))
torch.save(torch.randint(10, (4,), dtype=torch.long), os.path.join(ali_dir, "b.pt"))
torch.save(torch.randint(10, (5,), dtype=torch.long), os.path.join(ali_dir, "c.pt"))
data_set = data.SpectDataSet(temp_dir, warn_on_missing=False)
assert data_set.has_ali
assert data_set.utt_ids == ("b",)
with pytest.warns(UserWarning) as warnings:
data_set = data.SpectDataSet(temp_dir)
assert len(warnings) == 2
assert any(str(x.message) == "Missing ali for uttid: 'a'" for x in warnings)
assert any(str(x.message) == "Missing feat for uttid: 'c'" for x in warnings)
def test_spect_data_write_pdf(temp_dir, device):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
os.makedirs(feat_dir)
torch.save(torch.rand(3, 3), os.path.join(feat_dir, "a.pt"))
data_set = data.SpectDataSet(temp_dir)
z = torch.randint(10, (4, 5), dtype=torch.long)
if device == "cuda":
data_set.write_pdf("b", z.cuda())
else:
data_set.write_pdf("b", z)
zp = torch.load(os.path.join(temp_dir, "pdfs", "b.pt"))
assert isinstance(zp, torch.FloatTensor)
assert torch.allclose(zp, z.float())
data_set.write_pdf(0, torch.rand(10, 4))
assert os.path.exists(os.path.join(temp_dir, "pdfs", "a.pt"))
data_set.write_pdf("c", z, pdfs_dir=os.path.join(temp_dir, "foop"))
assert os.path.exists(os.path.join(temp_dir, "foop", "c.pt"))
@pytest.mark.parametrize("eos", [None, -1])
@pytest.mark.parametrize("sos", [None, -2])
def test_spect_data_write_hyp(temp_dir, device, sos, eos):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
os.makedirs(feat_dir)
torch.save(torch.rand(3, 3), os.path.join(feat_dir, "a.pt"))
data_set = data.SpectDataSet(temp_dir, sos=sos, eos=eos)
z = torch.randint(10, (4, 3), dtype=torch.float)
zz = z
if sos:
zz = torch.cat([torch.full_like(zz, sos), zz])
if eos:
zz = torch.cat([zz, torch.full_like(z, eos)])
if device == "cuda":
data_set.write_hyp("b", zz.cuda())
else:
data_set.write_hyp("b", zz)
zp = torch.load(os.path.join(temp_dir, "hyp", "b.pt"))
assert isinstance(zp, torch.LongTensor)
assert torch.all(zp == z.long())
data_set.write_hyp(0, torch.randint(10, (11, 3)))
assert os.path.exists(os.path.join(temp_dir, "hyp", "a.pt"))
data_set.write_hyp("c", z, hyp_dir=os.path.join(temp_dir, "foop"))
assert os.path.exists(os.path.join(temp_dir, "foop", "c.pt"))
@pytest.mark.cpu
@pytest.mark.parametrize("eos", [None, 10000])
def test_spect_data_set_validity(temp_dir, eos):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
ref_dir = os.path.join(temp_dir, "ref")
feats_a_pt = os.path.join(feat_dir, "a.pt")
feats_b_pt = os.path.join(feat_dir, "b.pt")
ali_a_pt = os.path.join(ali_dir, "a.pt")
ali_b_pt = os.path.join(ali_dir, "b.pt")
ref_a_pt = os.path.join(ref_dir, "a.pt")
ref_b_pt = os.path.join(ref_dir, "b.pt")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
os.makedirs(ref_dir)
torch.save(torch.rand(10, 4), feats_a_pt)
torch.save(torch.rand(4, 4), feats_b_pt)
torch.save(torch.randint(10, (10,), dtype=torch.long), ali_a_pt)
torch.save(torch.randint(10, (4,), dtype=torch.long), ali_b_pt)
torch.save(
torch.cat(
[
torch.randint(10, (11, 1), dtype=torch.long),
torch.full((11, 2), -1, dtype=torch.long),
],
-1,
),
ref_a_pt,
)
torch.save(torch.tensor([[0, 3, 4], [1, 1, 2]]), ref_b_pt)
data_set = data.SpectDataSet(temp_dir, eos=eos)
data.validate_spect_data_set(data_set)
torch.save(torch.rand(4, 4).long(), feats_b_pt)
with pytest.raises(ValueError, match="not the same tensor type"):
data.validate_spect_data_set(data_set)
torch.save(
torch.rand(
4,
),
feats_b_pt,
)
with pytest.raises(ValueError, match="does not have two dimensions"):
data.validate_spect_data_set(data_set)
torch.save(torch.rand(4, 3), feats_b_pt)
with pytest.raises(ValueError, match="has second dimension of size 3.*"):
data.validate_spect_data_set(data_set)
torch.save(torch.rand(4, 4), feats_b_pt)
data.validate_spect_data_set(data_set)
torch.save(torch.randint(10, (4,)).int(), ali_b_pt)
with pytest.raises(ValueError, match="is not a long tensor"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True) # will fix bad type
data.validate_spect_data_set(data_set) # fine after correction
torch.save(torch.randint(10, (4, 1), dtype=torch.long), ali_b_pt)
with pytest.raises(ValueError, match="does not have one dimension"):
data.validate_spect_data_set(data_set)
torch.save(torch.randint(10, (3,), dtype=torch.long), ali_b_pt)
with pytest.raises(ValueError, match="does not have the same first"):
data.validate_spect_data_set(data_set)
torch.save(torch.randint(10, (4,), dtype=torch.long), ali_b_pt)
data.validate_spect_data_set(data_set)
torch.save(torch.Tensor([[0, 1, 2]]).int(), ref_b_pt)
with pytest.raises(ValueError, match="is not a long tensor"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True) # convert to long
data.validate_spect_data_set(data_set)
torch.save(torch.tensor([[0, -1, 2], [1, 1, 2]]), ref_b_pt)
with pytest.raises(ValueError, match="invalid boundaries"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True) # will remove end bound
data.validate_spect_data_set(data_set)
torch.save(torch.tensor([[0, 0, 1], [1, 3, 5]]), ref_b_pt)
with pytest.raises(ValueError, match="invalid boundaries"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True) # will trim 5 to 4
data.validate_spect_data_set(data_set)
torch.save(torch.tensor([[0, 0, 1], [1, 4, 5]]), ref_b_pt)
with pytest.raises(ValueError, match="invalid boundaries"):
data.validate_spect_data_set(data_set, True) # will not trim b/c causes s == e
torch.save(torch.tensor([1, 2, 3]), ref_b_pt)
with pytest.raises(ValueError, match="were 2D"):
data.validate_spect_data_set(data_set)
torch.save(torch.tensor([10, 4, 2, 5]), ref_a_pt)
data.validate_spect_data_set(data_set)
@pytest.mark.gpu
def test_validate_spect_data_set_cuda(temp_dir):
torch.manual_seed(29)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
ref_dir = os.path.join(temp_dir, "ref")
feats_pt = os.path.join(feat_dir, "a.pt")
ali_pt = os.path.join(ali_dir, "a.pt")
ref_pt = os.path.join(ref_dir, "a.pt")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
os.makedirs(ref_dir)
torch.save(torch.rand(10, 5), feats_pt)
torch.save(torch.randint(10, (10,), dtype=torch.long), ali_pt)
torch.save(torch.tensor([1, 2, 3]), ref_pt)
data_set = data.SpectDataSet(temp_dir)
data.validate_spect_data_set(data_set)
torch.save(torch.rand(10, 5).cuda(), feats_pt)
with pytest.raises(ValueError, match="cuda"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True) # to CPU
data.validate_spect_data_set(data_set)
torch.save(torch.rand(10, 5).cuda(), feats_pt)
torch.save(torch.randint(10, (10,), dtype=torch.long).cuda(), ali_pt)
torch.save(torch.tensor([1, 2, 3]).cuda(), ref_pt)
with pytest.raises(ValueError, match="cuda"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True) # to CPU
data.validate_spect_data_set(data_set)
@pytest.mark.cpu
@pytest.mark.parametrize("processes", [0, 2])
def test_read_trn(processes):
trn = StringIO()
trn.write(
"""\
here is a simple example (a)
nothing should go wrong (b)
"""
)
trn.seek(0)
act = data.read_trn(trn, processes=processes, chunk_size=1)
assert act == [
("a", ["here", "is", "a", "simple", "example"]),
("b", ["nothing", "should", "go", "wrong"]),
]
trn.seek(0)
trn.write(
"""\
here is an { example /with} some alternates (a)
} and /here/ is {something really / {really}} (stupid) { ignore this (b)
(c)
a11 (d)
"""
)
trn.seek(0)
act = data.read_trn(trn, warn=False, processes=processes)
assert act == [
(
"a",
[
"here",
"is",
"an",
([["example"], ["with"]], -1, -1),
"some",
"alternates",
],
),
(
"b",
[
"}",
"and",
"/here/",
"is",
([["something", "really"], [[["really"]]]], -1, -1),
"(stupid)",
],
),
("c", []),
("d", ["a11"]),
]
@pytest.mark.cpu
def test_read_ctm():
ctm = StringIO()
ctm.write(
"""\
utt1 A 0.0 0.1 a
utt1 A 0.5 0.1 c ;; ctm files should always be ordered, but we tolerate
;; different orders
utt2 B 0.1 1.0 d
utt1 B 0.4 0.3 b
;; utt2 A 0.2 1.0 f
"""
)
ctm.seek(0)
act = data.read_ctm(ctm)
assert act == [
("utt1", [("a", 0.0, 0.1), ("b", 0.4, 0.7), ("c", 0.5, 0.6)]),
("utt2", [("d", 0.1, 1.1)]),
]
ctm.seek(0)
act = data.read_ctm(
ctm, {("utt1", "A"): "foo", ("utt1", "B"): "bar", ("utt2", "B"): "baz"}
)
assert act == [
("foo", [("a", 0.0, 0.1), ("c", 0.5, 0.6)]),
("baz", [("d", 0.1, 1.1)]),
("bar", [("b", 0.4, 0.7)]),
]
with pytest.raises(ValueError):
ctm.write("utt3 -0.1 1.0 woop\n")
ctm.seek(0)
data.read_ctm(ctm)
@pytest.mark.cpu
def test_write_trn():
trn = StringIO()
transcripts = [
("a", ["again", "a", "simple", "example"]),
("b", ["should", "get", "right", "no", "prob"]),
]
data.write_trn(transcripts, trn)
trn.seek(0)
assert (
"""\
again a simple example (a)
should get right no prob (b)
"""
== trn.read()
)
trn.seek(0)
trn.truncate()
transcripts = [
(
" c ",
[
("unnecessary", -1, -1),
([["complexity", [["can"]]], ["also", "be"]], 10, 4),
"handled",
],
),
("d", []),
("e", ["a11"]),
]
data.write_trn(transcripts, trn)
trn.seek(0)
assert (
"""\
unnecessary { complexity { can } / also be } handled ( c )
(d)
a11 (e)
"""
== trn.read()
)
@pytest.mark.cpu
def test_write_ctm():
ctm = StringIO()
transcripts = [
(
"c",
[
("here", 0.1, 0.2),
("are", 0.3, 0.5),
("some", 0.2, 0.4),
("unordered", 0.5, 0.5),
("tokens", 10.0, 1000),
],
),
("b", []),
("a", [("hullo", 0.0, 10.0111)]),
]
data.write_ctm(transcripts, ctm)
ctm.seek(0)
assert (
"""\
a A 0.0 10.0111 hullo
c A 0.1 0.1 here
c A 0.2 0.2 some
c A 0.3 0.2 are
c A 0.5 0.0 unordered
c A 10.0 990.0 tokens
"""
== ctm.read()
)
ctm.seek(0)
ctm.truncate()
data.write_ctm(
transcripts,
ctm,
{"a": ("last", "A"), "b": ("middle", "B"), "c": ("first", "C")},
)
ctm.seek(0)
assert (
"""\
first C 0.1 0.1 here
first C 0.2 0.2 some
first C 0.3 0.2 are
first C 0.5 0.0 unordered
first C 10.0 990.0 tokens
last A 0.0 10.0111 hullo
"""
== ctm.read()
)
transcripts.append(("foo", [("a", 0.1, 0.2), ("b", 0.2, 0.1)]))
with pytest.raises(ValueError):
data.write_ctm(transcripts, ctm)
@pytest.mark.cpu
@pytest.mark.parametrize(
"transcript,token2id,unk,skip_frame_times,exp",
[
([], None, None, False, torch.LongTensor(0, 3)),
(
[1, 2, 3, 4],
None,
None,
True,
torch.LongTensor([1, 2, 3, 4]),
),
(
[1, ("a", 4, 10), "a", 3],
{"a": 2},
None,
False,
torch.LongTensor([[1, -1, -1], [2, 4, 10], [2, -1, -1], [3, -1, -1]]),
),
(
["foo", 1, "bar"],
{"foo": 0, "baz": 3},
"baz",
False,
torch.LongTensor([[0, -1, -1], [3, -1, -1], [3, -1, -1]]),
),
],
)
def test_transcript_to_token(transcript, token2id, unk, skip_frame_times, exp):
act = data.transcript_to_token(
transcript, token2id, unk=unk, skip_frame_times=skip_frame_times
)
assert torch.all(exp == act)
transcript = ["foo"] + transcript
with pytest.raises(Exception):
data.transcript_to_token(transcript, token2id)
@pytest.mark.cpu
def test_transcript_to_token_frame_shift():
trans = [(12, 0.5, 0.81), 420, (1, 2.1, 2.2), (3, 2.8, 2.815), (12, 2.9, 3.0025)]
# normal case: frame shift 10ms. Frame happens every hundredth of a second,
# so multiply by 100. Half-frames should round up; quarter-frames down
tok = data.transcript_to_token(trans, frame_shift_ms=10)
assert torch.allclose(
tok,
torch.LongTensor(
[[12, 50, 81], [420, -1, -1], [1, 210, 220], [3, 280, 282], [12, 290, 300]]
),
)
# raw case @ 8000Hz sample rate. "Frame" is every sample. frames/msec =
# 1000 / sample_rate_hz = 1 / 8.
tok = data.transcript_to_token(trans, frame_shift_ms=1 / 8)
assert torch.allclose(
tok,
torch.LongTensor(
[
[12, 4000, 6480],
[420, -1, -1],
[1, 16800, 17600],
[3, 22400, 22520],
[12, 23200, 24020],
]
),
)
@pytest.mark.cpu
@pytest.mark.parametrize(
"tok,id2token,exp",
[
(torch.LongTensor(0, 3), None, []),
(
torch.LongTensor([[1, -1, -1], [2, -1, -1], [3, -1, -1], [4, -1, -1]]),
None,
[1, 2, 3, 4],
),
(
torch.LongTensor([[1, 3, 4], [3, 4, 5], [2, -1, -1]]),
{1: "a", 2: "b"},
[("a", 3, 4), (3, 4, 5), "b"],
),
(torch.tensor(range(10)), None, list(range(10))),
(torch.tensor(range(5)).unsqueeze(-1), None, list(range(5))),
],
)
def test_token_to_transcript(tok, id2token, exp):
act = data.token_to_transcript(tok, id2token)
assert exp == act
@pytest.mark.cpu
def test_token_to_transcript_frame_shift():
tok = torch.LongTensor([[1, -1, 10], [2, 1000, 2000], [3, 12345, 678910]])
# standard case: 10ms frame shift
# 10ms per frame means divide frame number by 100
trans = data.token_to_transcript(tok, frame_shift_ms=10)
assert trans == [1, (2, 10.0, 20.0), (3, 123.45, 6789.10)]
# raw case: 8000 samples / sec = 8 samples / msec so frame shift is 1 / 8
trans = data.token_to_transcript(tok, frame_shift_ms=1 / 8)
assert trans == [
1,
(2, 1000 / 8000, 2000 / 8000),
(3, 12345 / 8000, 678910 / 8000),
]
@pytest.mark.cpu
@pytest.mark.parametrize("reverse", [True, False])
def test_context_window_data_set(temp_dir, reverse):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
os.makedirs(feat_dir)
a = torch.rand(2, 10)
torch.save(a, os.path.join(feat_dir, "a.pt"))
data_set = data.ContextWindowDataSet(temp_dir, 1, 1, reverse=reverse)
windowed, _ = data_set[0]
assert tuple(windowed.shape) == (2, 3, 10)
if reverse:
# [[a1, a0, a0], [a1, a1, a0]]
assert torch.allclose(a[0], windowed[0, 1:])
assert torch.allclose(a[1], windowed[0, 0])
assert torch.allclose(a[0], windowed[1, 2])
assert torch.allclose(a[1], windowed[1, :2])
else:
# [[a0, a0, a1], [a0, a1, a1]]
assert torch.allclose(a[0], windowed[0, :2])
assert torch.allclose(a[1], windowed[0, 2])
assert torch.allclose(a[0], windowed[1, 0])
assert torch.allclose(a[1], windowed[1, 1:])
@pytest.mark.cpu
def test_epoch_random_sampler(temp_dir):
data_source = torch.utils.data.TensorDataset(torch.arange(100))
sampler = data.EpochRandomSampler(data_source, base_seed=1)
samples_ep0 = tuple(sampler)
samples_ep1 = tuple(sampler)
assert samples_ep0 != samples_ep1
assert sorted(samples_ep0) == list(range(100))
assert sorted(samples_ep1) == list(range(100))
assert samples_ep0 == tuple(sampler.get_samples_for_epoch(0))
assert samples_ep1 == tuple(sampler.get_samples_for_epoch(1))
sampler = data.EpochRandomSampler(data_source, init_epoch=10, base_seed=1)
assert samples_ep0 == tuple(sampler.get_samples_for_epoch(0))
assert samples_ep1 == tuple(sampler.get_samples_for_epoch(1))
# should be reproducible if we set torch manual seed
torch.manual_seed(5)
sampler = data.EpochRandomSampler(data_source)
samples_ep0 = tuple(sampler)
torch.manual_seed(5)
sampler = data.EpochRandomSampler(data_source)
assert samples_ep0 == tuple(sampler)
@pytest.mark.cpu
@pytest.mark.parametrize(
"feat_sizes",
[((3, 5, 4), (4, 5, 4), (1, 5, 4)), ((2, 10, 5),) * 10],
ids=["short", "long"],
)
@pytest.mark.parametrize("include_ali", [True, False])
def test_context_window_seq_to_batch(feat_sizes, include_ali):
torch.manual_seed(1)
feats = tuple(torch.rand(*x) for x in feat_sizes)
if include_ali:
alis = tuple(torch.randint(10, (x[0],), dtype=torch.long) for x in feat_sizes)
else:
alis = repeat(None)
seq = zip(feats, alis)
batch_feats, batch_ali = data.context_window_seq_to_batch(seq)
assert torch.allclose(torch.cat(feats), batch_feats)
if include_ali:
assert torch.all(torch.cat(alis) == batch_ali)
else:
assert batch_ali is None
@pytest.mark.cpu
@pytest.mark.parametrize("include_ali", [True, False])
@pytest.mark.parametrize(
"include_ref,include_frame_shift", [(True, True), (True, False), (False, None)]
)
@pytest.mark.parametrize("batch_first", [True, False])
def test_spect_seq_to_batch(include_ali, include_ref, batch_first, include_frame_shift):
torch.manual_seed(1)
feat_sizes = tuple(
torch.randint(1, 30, (1,)).item()
for _ in range(torch.randint(3, 10, (1,)).item())
)
feats = tuple(torch.randn(x, 5) for x in feat_sizes)
if include_ali:
alis = tuple(torch.randint(100, (x,), dtype=torch.long) for x in feat_sizes)
else:
alis = repeat(None)
if include_ref:
ref_sizes = tuple(
torch.randint(1, 30, (1,)).item() for _ in range(len(feat_sizes))
)
extra_dim = (3,) if include_frame_shift else tuple()
refs = tuple(
torch.randint(100, (x,) + extra_dim, dtype=torch.long) for x in ref_sizes
)
else:
ref_sizes = repeat(None)
refs = repeat(None)
(
batch_feats,
batch_ali,
batch_ref,
batch_feat_sizes,
batch_ref_sizes,
) = data.spect_seq_to_batch(zip(feats, alis, refs), batch_first=batch_first)
feat_sizes, feats, alis, refs, ref_sizes = zip(
*sorted(zip(feat_sizes, feats, alis, refs, ref_sizes), key=lambda x: -x[0])
)
assert torch.all(torch.tensor(feat_sizes) == batch_feat_sizes)
if not batch_first:
batch_feats = batch_feats.transpose(0, 1)
if include_ali:
batch_ali = batch_ali.transpose(0, 1)
if include_ref:
batch_ref = batch_ref.transpose(0, 1)
assert all(
torch.allclose(a[: b.shape[0]], b)
and torch.allclose(a[b.shape[0] :], torch.tensor([0.0]))
for (a, b) in zip(batch_feats, feats)
)
if include_ali:
assert all(
torch.all(a[: b.shape[0]] == b)
and torch.all(a[b.shape[0] :] == torch.tensor([INDEX_PAD_VALUE]))
for (a, b) in zip(batch_ali, alis)
)
else:
assert batch_ali is None
if include_ref:
assert torch.all(torch.tensor(ref_sizes) == batch_ref_sizes)
assert all(
torch.all(a[: b.shape[0]] == b)
and torch.all(a[b.shape[0] :] == torch.tensor([INDEX_PAD_VALUE]))
for (a, b) in zip(batch_ref, refs)
)
else:
assert batch_ref is None
assert batch_ref_sizes is None
@pytest.mark.cpu
@pytest.mark.parametrize("eos", [None, -1])
@pytest.mark.parametrize("sos", [None, -2])
@pytest.mark.parametrize("split_params", [True, False])
@pytest.mark.parametrize("include_frame_shift", [True, False])
@pytest.mark.parametrize("feat_dtype", [torch.float, torch.int])
def test_spect_training_data_loader(
temp_dir,
populate_torch_dir,
sos,
eos,
split_params,
include_frame_shift,
feat_dtype,
):
torch.manual_seed(40)
num_utts, batch_size, num_filts = 20, 5, 11
populate_torch_dir(
temp_dir,
num_utts,
num_filts=num_filts,
include_frame_shift=include_frame_shift,
feat_dtype=feat_dtype,
)
if split_params:
params = data.DataSetParams(batch_size=batch_size)
data_params = data.SpectDataParams(sos=sos, eos=eos)
else:
params = data.SpectDataSetParams(batch_size=batch_size, sos=sos, eos=eos)
data_params = None
# check missing either ali or ref gives None in batches
data_loader = data.SpectTrainingDataLoader(
temp_dir, params, data_params=data_params, ali_subdir=None, seed=2
)
assert next(iter(data_loader))[1] is None
data_loader = data.SpectTrainingDataLoader(
temp_dir, params, data_params=data_params, ref_subdir=None, seed=2
)
assert next(iter(data_loader))[2] is None
assert next(iter(data_loader))[4] is None
data_loader = data.SpectTrainingDataLoader(
temp_dir, params, data_params=data_params, seed=2
)
def _get_epoch(sort):
ep_feats, ep_ali, ep_ref = [], [], []
ep_feat_sizes, ep_ref_sizes = [], []
max_T = 0
max_R = 0
batch_first = data_loader.batch_first
for b_feats, b_ali, b_ref, b_feat_sizes, b_ref_sizes in data_loader:
if not batch_first:
b_feats = b_feats.transpose(0, 1)
b_ali = b_ali.transpose(0, 1)
b_ref = b_ref.transpose(0, 1)
max_T = max(max_T, b_feat_sizes[0])
R_star = max(b_ref_sizes)
max_R = max(max_R, R_star)
assert b_feats.shape[0] == batch_size
assert b_ali.shape[0] == batch_size
assert b_ref.shape[0] == batch_size
assert b_feats.shape[-1] == num_filts
assert b_feats.shape[1] == b_feat_sizes[0]
assert b_ali.shape[1] == b_feat_sizes[0]
assert b_ref.shape[1] == R_star
assert b_ref.dim() == (3 if include_frame_shift else 2)
ep_feats += tuple(b_feats)
ep_ali += tuple(b_ali)
ep_ref += tuple(b_ref)
ep_feat_sizes += tuple(b_feat_sizes)
ep_ref_sizes += tuple(b_ref_sizes)
assert len(ep_feats) == num_utts
assert len(ep_ali) == num_utts
for i in range(num_utts):
ep_feats[i] = torch.nn.functional.pad(
ep_feats[i], (0, 0, 0, max_T - ep_ali[i].shape[0])
)
ep_ali[i] = torch.nn.functional.pad(
ep_ali[i], (0, max_T - ep_ali[i].shape[0]), value=INDEX_PAD_VALUE
)
if include_frame_shift:
ep_ref[i] = torch.nn.functional.pad(
ep_ref[i],
(0, 0, 0, max_R - ep_ref[i].shape[0]),
value=INDEX_PAD_VALUE,
)
else:
ep_ref[i] = torch.nn.functional.pad(
ep_ref[i], (0, max_R - ep_ref[i].shape[0]), value=INDEX_PAD_VALUE
)
if sort:
ep_feats, ep_ali, ep_ref, ep_feat_sizes, ep_ref_sizes = zip(
*sorted(
zip(ep_feats, ep_ali, ep_ref, ep_feat_sizes, ep_ref_sizes),
key=lambda x: (-x[3], -x[4], x[0][0, 0]),
)
)
return ep_feats, ep_ali, ep_ref, ep_feat_sizes, ep_ref_sizes
def _compare_epochs(ep_a, ep_b, same):
a_feats, a_ali, a_ref, a_feat_sizes, a_ref_sizes = ep_a
b_feats, b_ali, b_ref, b_feat_sizes, b_ref_sizes = ep_b
a_feats, b_feats = torch.stack(a_feats), torch.stack(b_feats)
a_ali, b_ali = torch.stack(a_ali), torch.stack(b_ali)
a_ref, b_ref = torch.stack(a_ref), torch.stack(b_ref)
if same:
assert a_feat_sizes == b_feat_sizes
assert a_ref_sizes == b_ref_sizes
assert torch.allclose(a_feats, b_feats)
assert torch.all(a_ali == b_ali)
assert torch.all(a_ref == b_ref)
else:
assert a_feat_sizes != b_feat_sizes
assert a_ref_sizes != b_ref_sizes
assert not torch.allclose(a_feats, b_feats)
assert torch.any(a_ali != b_ali)
assert torch.any(a_ref != b_ref)
ep0 = _get_epoch(False)
ep1 = _get_epoch(False)
_compare_epochs(ep0, ep1, False) # could be same by fluke
_compare_epochs(_get_epoch(True), _get_epoch(True), True)
data_loader.epoch = 1
_compare_epochs(ep1, _get_epoch(False), True)
# XXX(sdrobert): warning spit out on CI if num_workers > 2
data_loader = data.SpectTrainingDataLoader(
temp_dir, params, data_params=data_params, num_workers=2, seed=2
)
_compare_epochs(ep0, _get_epoch(False), True)
_compare_epochs(ep1, _get_epoch(False), True)
data_loader.batch_first = False
data_loader.epoch = 0
_compare_epochs(ep0, _get_epoch(False), True)
_compare_epochs(ep1, _get_epoch(False), True)
@pytest.mark.cpu
@pytest.mark.parametrize("eos", [None, -1])
@pytest.mark.parametrize("sos", [None, -2])
@pytest.mark.parametrize("split_params", [True, False])
@pytest.mark.parametrize("include_frame_shift", [True, False])
@pytest.mark.parametrize("feat_dtype", [torch.float, torch.int])
def test_spect_evaluation_data_loader(
temp_dir,
populate_torch_dir,
sos,
eos,
split_params,
include_frame_shift,
feat_dtype,
):
torch.manual_seed(41)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
batch_size = 5
if split_params:
params = data.DataSetParams(batch_size=batch_size)
data_params = data.SpectDataParams(sos=sos, eos=eos)
else:
params = data.SpectDataSetParams(batch_size=batch_size, sos=sos, eos=eos)
data_params = None
feats, ali, ref, feat_sizes, ref_sizes, utt_ids = populate_torch_dir(
temp_dir, 20, include_frame_shift=include_frame_shift, feat_dtype=feat_dtype
)
if sos is not None:
if include_frame_shift:
sos_sym = torch.full((3,), -1, dtype=torch.long)
sos_sym[0] = sos
sos_sym = sos_sym.unsqueeze(0)
else:
sos_sym = torch.full((1,), sos, dtype=torch.long)
ref = [torch.cat([sos_sym, x], 0) for x in ref]
ref_sizes = [x + 1 for x in ref_sizes]
if eos is not None:
if include_frame_shift:
eos_sym = torch.full((3,), eos, dtype=torch.long)
eos_sym[0] = eos
eos_sym = eos_sym.unsqueeze(0)
else:
eos_sym = torch.full((1,), eos, dtype=torch.long)
ref = [torch.cat([x, eos_sym], 0) for x in ref]
ref_sizes = [x + 1 for x in ref_sizes]
# check that ali and ref can be missing
data_loader = data.SpectEvaluationDataLoader(
temp_dir, params, data_params=data_params, ali_subdir=None, ref_subdir=None
)
assert next(iter(data_loader))[1:3] == (None, None)
assert next(iter(data_loader))[4] is None
data_loader = data.SpectEvaluationDataLoader(
temp_dir, params, data_params=data_params
)
def _compare_data_loader():
batch_first = data_loader.batch_first
assert len(data_loader) == 4
cur_idx = 0
for (
b_feats,
b_ali,
b_ref,
b_feat_sizes,
b_ref_sizes,
b_utt_ids,
) in data_loader:
if not batch_first:
b_feats = b_feats.transpose(0, 1)
b_ali = b_ali.transpose(0, 1)
b_ref = b_ref.transpose(0, 1)
R_star = max(b_ref_sizes)
assert tuple(b_feats.shape) == (5, b_feat_sizes[0], 5)
assert tuple(b_ali.shape) == (5, b_feat_sizes[0])
if include_frame_shift:
assert tuple(b_ref.shape) == (5, R_star, 3)
else:
assert tuple(b_ref.shape) == (5, R_star)
# sort the sub-section of the master list by feature size
s_feats, s_ali, s_ref, s_feat_sizes, s_ref_sizes, s_utt_ids = zip(
*sorted(
zip(
feats[cur_idx : cur_idx + 5],
ali[cur_idx : cur_idx + 5],
ref[cur_idx : cur_idx + 5],
feat_sizes[cur_idx : cur_idx + 5],
ref_sizes[cur_idx : cur_idx + 5],
utt_ids[cur_idx : cur_idx + 5],
),
key=lambda x: -x[3],
)
)
assert b_utt_ids == s_utt_ids
assert tuple(b_feat_sizes) == s_feat_sizes
assert tuple(b_ref_sizes) == s_ref_sizes
for a, b in zip(b_feats, s_feats):
assert torch.allclose(a[: b.shape[0]], b)
assert torch.allclose(
a[b.shape[0] :], torch.tensor([0], dtype=feat_dtype)
)
for a, b in zip(b_ali, s_ali):
assert torch.all(a[: b.shape[0]] == b)
assert torch.all(a[b.shape[0] :] == torch.tensor([INDEX_PAD_VALUE]))
for a, b in zip(b_ref, s_ref):
assert torch.all(a[: b.shape[0]] == b)
assert torch.all(a[b.shape[0] :] == torch.tensor([INDEX_PAD_VALUE]))
cur_idx += 5
_compare_data_loader()
_compare_data_loader() # order should not change
data_loader = data.SpectEvaluationDataLoader(
temp_dir, params, data_params=data_params, num_workers=2
)
_compare_data_loader() # order should still not change
data_loader.batch_first = False
_compare_data_loader()
@pytest.mark.cpu
@pytest.mark.parametrize("split_params", [True, False])
def test_window_training_data_loader(temp_dir, populate_torch_dir, split_params):
populate_torch_dir(temp_dir, 5, num_filts=2)
seed, batch_size, context_left, context_right = 2, 5, 1, 1
if split_params:
params = data.DataSetParams(batch_size=batch_size, drop_last=True)
data_params = data.ContextWindowDataParams(
context_left=context_left, context_right=context_right
)
else:
params = data.ContextWindowDataSetParams(
context_left=context_left,
context_right=context_right,
batch_size=batch_size,
drop_last=True,
)
data_params = None
data_loader = data.ContextWindowTrainingDataLoader(
temp_dir, params, data_params=data_params, seed=seed
)
total_windows_ep0 = 0
for feat, ali in data_loader:
windows = feat.shape[0]
assert tuple(feat.shape) == (windows, 3, 2)
assert tuple(ali.shape) == (windows,)
total_windows_ep0 += windows
assert total_windows_ep0 >= batch_size
feats_ep1_a, alis_ep1_a = [], []
total_windows_ep1 = 0
for feats, alis in data_loader:
windows = feat.shape[0]
assert tuple(feat.shape) == (windows, 3, 2)
assert tuple(ali.shape) == (windows,)
feats_ep1_a.append(feats)
alis_ep1_a.append(alis)
total_windows_ep1 += windows
assert total_windows_ep0 == total_windows_ep1
data_loader = data.ContextWindowTrainingDataLoader(
temp_dir,
params,
init_epoch=1,
data_params=data_params,
num_workers=2,
seed=seed,
)
feats_ep1_b, alis_ep1_b = [], []
for feats, alis in data_loader:
feats_ep1_b.append(feats)
alis_ep1_b.append(alis)
assert all(
torch.allclose(feats_a, feats_b)
for (feats_a, feats_b) in zip(feats_ep1_a, feats_ep1_b)
)
assert all(
torch.all(alis_a == alis_b) for (alis_a, alis_b) in zip(alis_ep1_a, alis_ep1_b)
)
data_loader.epoch = 1
feats_ep1_c, alis_ep1_c = [], []
for feats, alis in data_loader:
feats_ep1_c.append(feats)
alis_ep1_c.append(alis)
assert all(
torch.allclose(feats_a, feats_c)
for (feats_a, feats_c) in zip(feats_ep1_a, feats_ep1_c)
)
assert all(
torch.all(alis_a == alis_c) for (alis_a, alis_c) in zip(alis_ep1_a, alis_ep1_c)
)
@pytest.mark.cpu
@pytest.mark.parametrize("split_params", [True, False])
def test_window_evaluation_data_loader(temp_dir, populate_torch_dir, split_params):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
if split_params:
params = data.DataSetParams(batch_size=5)
data_params = data.ContextWindowDataParams(context_left=1, context_right=1)
else:
params = data.ContextWindowDataSetParams(
context_left=1, context_right=1, batch_size=5
)
data_params = None
feats, alis, _, feat_sizes, _, utt_ids = populate_torch_dir(
temp_dir, 20, include_ref=False
)
def _compare_data_loader(data_loader):
assert len(data_loader) == 4
cur_idx = 0
for b_feats, b_alis, b_feat_sizes, b_utt_ids in data_loader:
assert tuple(b_feats.shape[1:]) == (3, 5)
assert b_feats.shape[0] == sum(b_feat_sizes)
assert tuple(b_utt_ids) == tuple(utt_ids[cur_idx : cur_idx + 5])
assert torch.allclose(
b_feats[:, 1], torch.cat(feats[cur_idx : cur_idx + 5])
)
assert torch.all(b_alis == torch.cat(alis[cur_idx : cur_idx + 5]))
cur_idx += 5
data_loader = data.ContextWindowEvaluationDataLoader(
temp_dir, params, data_params=data_params, ali_subdir=None
)
# check batching works when alignments are empty
assert next(iter(data_loader))[1] is None
data_loader = data.ContextWindowEvaluationDataLoader(
temp_dir, params, data_params=data_params
)
_compare_data_loader(data_loader)
_compare_data_loader(data_loader) # order should not change
data_loader = data.ContextWindowEvaluationDataLoader(
temp_dir, params, data_params=data_params, num_workers=2
)
_compare_data_loader(data_loader) # order should still not change
@pytest.mark.cpu
def test_pydrobert_param_optuna_hooks():
poptuna = pytest.importorskip("pydrobert.param.optuna")
optuna = pytest.importorskip("optuna")
for class_ in (
data.DataSetParams,
data.SpectDataSetParams,
data.ContextWindowDataParams,
data.ContextWindowDataSetParams,
):
assert issubclass(class_, poptuna.TunableParameterized)
global_dict = {
"data_set": data.DataSetParams(),
"spect_data": data.SpectDataParams(),
"spect_data_set": data.SpectDataSetParams(),
"context_window_data": data.ContextWindowDataParams(),
"context_window_data_set": data.ContextWindowDataSetParams(),
}
assert {
"data_set.batch_size",
"spect_data.eos",
"spect_data_set.batch_size",
"context_window_data.reverse",
"context_window_data_set.batch_size",
} - poptuna.get_param_dict_tunable(global_dict) == {"spect_data.eos"}
def objective(trial):
param_dict = poptuna.suggest_param_dict(trial, global_dict)
return param_dict["data_set"].batch_size
sampler = optuna.samplers.RandomSampler(seed=5)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
assert (
not {
"data_set.batch_size",
"spect_data_set.batch_size",
"context_window_data.reverse",
"context_window_data_set.batch_size",
}
- set(study.best_params)
)
assert study.best_params["data_set.batch_size"] < 7
| 36.027188 | 88 | 0.603905 |
import os
from itertools import repeat
from io import StringIO
import pytest
import torch
import torch.utils.data
import pydrobert.torch.data as data
from pydrobert.torch import INDEX_PAD_VALUE
@pytest.mark.cpu
@pytest.mark.parametrize("left", [0, 1, 100])
@pytest.mark.parametrize("right", [0, 1, 100])
@pytest.mark.parametrize("T", [1, 5, 10])
def test_extract_window(left, right, T):
signal = torch.arange(T).view(-1, 1).expand(-1, 10)
for frame_idx in range(T):
window = data.extract_window(signal, frame_idx, left, right)
left_pad = max(left - frame_idx, 0)
right_pad = max(frame_idx + right + 1 - T, 0)
assert tuple(window.shape) == (1 + left + right, 10)
if left_pad:
assert torch.all(window[:left_pad] == torch.tensor([0]))
if right_pad:
assert torch.all(window[-right_pad:] == torch.tensor([T - 1]))
assert torch.all(
window[left_pad : 1 + left + right - right_pad]
== torch.arange(
frame_idx - left + left_pad, frame_idx + right - right_pad + 1
)
.view(-1, 1)
.expand(-1, 10)
)
@pytest.mark.cpu
@pytest.mark.parametrize("num_utts", [1, 2, 10])
@pytest.mark.parametrize("file_prefix", ["prefix_", ""])
@pytest.mark.parametrize("eos", [1000, None])
@pytest.mark.parametrize("sos", [2000, None])
@pytest.mark.parametrize("feat_dtype", [torch.float, torch.int])
def test_valid_spect_data_set(
temp_dir, num_utts, file_prefix, populate_torch_dir, sos, eos, feat_dtype
):
feats, _, _, _, _, utt_ids = populate_torch_dir(
temp_dir,
num_utts,
file_prefix=file_prefix,
include_ali=False,
include_ref=False,
feat_dtype=feat_dtype,
)
populate_torch_dir(
temp_dir, num_utts, include_ali=False, include_ref=False, feat_dtype=feat_dtype
)
if not os.path.isdir(os.path.join(temp_dir, "feat", "fake")):
os.makedirs(os.path.join(temp_dir, "feat", "fake"))
torch.save(
torch.randint(100, (10, 5), dtype=feat_dtype),
os.path.join(temp_dir, "feat", "fake", file_prefix + "fake.pt"),
)
data_set = data.SpectDataSet(temp_dir, file_prefix=file_prefix, eos=eos)
assert not data_set.has_ali and not data_set.has_ref
assert len(utt_ids) == len(data_set.utt_ids)
assert all(utt_a == utt_b for (utt_a, utt_b) in zip(utt_ids, data_set.utt_ids))
assert all(
ali_b is None and ref_b is None and torch.allclose(feat_a, feat_b)
for (feat_a, (feat_b, ali_b, ref_b)) in zip(feats, data_set)
)
feats, alis, refs, _, _, utt_ids = populate_torch_dir(
temp_dir, num_utts, file_prefix=file_prefix, feat_dtype=feat_dtype
)
if sos is not None:
sos_sym = torch.full((3,), -1, dtype=torch.long)
sos_sym[0] = sos
sos_sym = sos_sym.unsqueeze(0)
refs = [torch.cat([sos_sym, x]) for x in refs]
if eos is not None:
eos_sym = torch.full((3,), -1, dtype=torch.long)
eos_sym[0] = eos
eos_sym = eos_sym.unsqueeze(0)
refs = [torch.cat([x, eos_sym]) for x in refs]
data_set = data.SpectDataSet(temp_dir, file_prefix=file_prefix, sos=sos, eos=eos)
assert data_set.has_ali and data_set.has_ref
assert len(utt_ids) == len(data_set.utt_ids)
assert all(utt_a == utt_b for (utt_a, utt_b) in zip(utt_ids, data_set.utt_ids))
assert all(
torch.all(ali_a == ali_b)
and torch.all(ref_a == ref_b)
and feat_a.dtype == feat_b.dtype
and torch.allclose(feat_a, feat_b)
for ((feat_a, ali_a, ref_a), (feat_b, ali_b, ref_b)) in zip(
zip(feats, alis, refs), data_set
)
)
subset_ids = data_set.utt_ids[: num_utts // 2]
data_set = data.SpectDataSet(
temp_dir, file_prefix=file_prefix, subset_ids=set(subset_ids), sos=sos, eos=eos
)
assert all(utt_a == utt_b for (utt_a, utt_b) in zip(subset_ids, data_set.utt_ids))
assert all(
torch.all(ali_a == ali_b)
and torch.all(ref_a == ref_b)
and torch.allclose(feat_a, feat_b)
for ((feat_a, ali_a, ref_a), (feat_b, ali_b, ref_b)) in zip(
zip(feats[: num_utts // 2], alis[: num_utts // 2], refs[: num_utts // 2]),
data_set,
)
)
@pytest.mark.cpu
def test_spect_data_set_warnings(temp_dir):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
torch.save(torch.rand(3, 3), os.path.join(feat_dir, "a.pt"))
torch.save(torch.rand(4, 3), os.path.join(feat_dir, "b.pt"))
torch.save(torch.randint(10, (4,), dtype=torch.long), os.path.join(ali_dir, "b.pt"))
torch.save(torch.randint(10, (5,), dtype=torch.long), os.path.join(ali_dir, "c.pt"))
data_set = data.SpectDataSet(temp_dir, warn_on_missing=False)
assert data_set.has_ali
assert data_set.utt_ids == ("b",)
with pytest.warns(UserWarning) as warnings:
data_set = data.SpectDataSet(temp_dir)
assert len(warnings) == 2
assert any(str(x.message) == "Missing ali for uttid: 'a'" for x in warnings)
assert any(str(x.message) == "Missing feat for uttid: 'c'" for x in warnings)
def test_spect_data_write_pdf(temp_dir, device):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
os.makedirs(feat_dir)
torch.save(torch.rand(3, 3), os.path.join(feat_dir, "a.pt"))
data_set = data.SpectDataSet(temp_dir)
z = torch.randint(10, (4, 5), dtype=torch.long)
if device == "cuda":
data_set.write_pdf("b", z.cuda())
else:
data_set.write_pdf("b", z)
zp = torch.load(os.path.join(temp_dir, "pdfs", "b.pt"))
assert isinstance(zp, torch.FloatTensor)
assert torch.allclose(zp, z.float())
data_set.write_pdf(0, torch.rand(10, 4))
assert os.path.exists(os.path.join(temp_dir, "pdfs", "a.pt"))
data_set.write_pdf("c", z, pdfs_dir=os.path.join(temp_dir, "foop"))
assert os.path.exists(os.path.join(temp_dir, "foop", "c.pt"))
@pytest.mark.parametrize("eos", [None, -1])
@pytest.mark.parametrize("sos", [None, -2])
def test_spect_data_write_hyp(temp_dir, device, sos, eos):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
os.makedirs(feat_dir)
torch.save(torch.rand(3, 3), os.path.join(feat_dir, "a.pt"))
data_set = data.SpectDataSet(temp_dir, sos=sos, eos=eos)
z = torch.randint(10, (4, 3), dtype=torch.float)
zz = z
if sos:
zz = torch.cat([torch.full_like(zz, sos), zz])
if eos:
zz = torch.cat([zz, torch.full_like(z, eos)])
if device == "cuda":
data_set.write_hyp("b", zz.cuda())
else:
data_set.write_hyp("b", zz)
zp = torch.load(os.path.join(temp_dir, "hyp", "b.pt"))
assert isinstance(zp, torch.LongTensor)
assert torch.all(zp == z.long())
data_set.write_hyp(0, torch.randint(10, (11, 3)))
assert os.path.exists(os.path.join(temp_dir, "hyp", "a.pt"))
data_set.write_hyp("c", z, hyp_dir=os.path.join(temp_dir, "foop"))
assert os.path.exists(os.path.join(temp_dir, "foop", "c.pt"))
@pytest.mark.cpu
@pytest.mark.parametrize("eos", [None, 10000])
def test_spect_data_set_validity(temp_dir, eos):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
ref_dir = os.path.join(temp_dir, "ref")
feats_a_pt = os.path.join(feat_dir, "a.pt")
feats_b_pt = os.path.join(feat_dir, "b.pt")
ali_a_pt = os.path.join(ali_dir, "a.pt")
ali_b_pt = os.path.join(ali_dir, "b.pt")
ref_a_pt = os.path.join(ref_dir, "a.pt")
ref_b_pt = os.path.join(ref_dir, "b.pt")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
os.makedirs(ref_dir)
torch.save(torch.rand(10, 4), feats_a_pt)
torch.save(torch.rand(4, 4), feats_b_pt)
torch.save(torch.randint(10, (10,), dtype=torch.long), ali_a_pt)
torch.save(torch.randint(10, (4,), dtype=torch.long), ali_b_pt)
torch.save(
torch.cat(
[
torch.randint(10, (11, 1), dtype=torch.long),
torch.full((11, 2), -1, dtype=torch.long),
],
-1,
),
ref_a_pt,
)
torch.save(torch.tensor([[0, 3, 4], [1, 1, 2]]), ref_b_pt)
data_set = data.SpectDataSet(temp_dir, eos=eos)
data.validate_spect_data_set(data_set)
torch.save(torch.rand(4, 4).long(), feats_b_pt)
with pytest.raises(ValueError, match="not the same tensor type"):
data.validate_spect_data_set(data_set)
torch.save(
torch.rand(
4,
),
feats_b_pt,
)
with pytest.raises(ValueError, match="does not have two dimensions"):
data.validate_spect_data_set(data_set)
torch.save(torch.rand(4, 3), feats_b_pt)
with pytest.raises(ValueError, match="has second dimension of size 3.*"):
data.validate_spect_data_set(data_set)
torch.save(torch.rand(4, 4), feats_b_pt)
data.validate_spect_data_set(data_set)
torch.save(torch.randint(10, (4,)).int(), ali_b_pt)
with pytest.raises(ValueError, match="is not a long tensor"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True)
data.validate_spect_data_set(data_set)
torch.save(torch.randint(10, (4, 1), dtype=torch.long), ali_b_pt)
with pytest.raises(ValueError, match="does not have one dimension"):
data.validate_spect_data_set(data_set)
torch.save(torch.randint(10, (3,), dtype=torch.long), ali_b_pt)
with pytest.raises(ValueError, match="does not have the same first"):
data.validate_spect_data_set(data_set)
torch.save(torch.randint(10, (4,), dtype=torch.long), ali_b_pt)
data.validate_spect_data_set(data_set)
torch.save(torch.Tensor([[0, 1, 2]]).int(), ref_b_pt)
with pytest.raises(ValueError, match="is not a long tensor"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True)
data.validate_spect_data_set(data_set)
torch.save(torch.tensor([[0, -1, 2], [1, 1, 2]]), ref_b_pt)
with pytest.raises(ValueError, match="invalid boundaries"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True)
data.validate_spect_data_set(data_set)
torch.save(torch.tensor([[0, 0, 1], [1, 3, 5]]), ref_b_pt)
with pytest.raises(ValueError, match="invalid boundaries"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True)
data.validate_spect_data_set(data_set)
torch.save(torch.tensor([[0, 0, 1], [1, 4, 5]]), ref_b_pt)
with pytest.raises(ValueError, match="invalid boundaries"):
data.validate_spect_data_set(data_set, True)
torch.save(torch.tensor([1, 2, 3]), ref_b_pt)
with pytest.raises(ValueError, match="were 2D"):
data.validate_spect_data_set(data_set)
torch.save(torch.tensor([10, 4, 2, 5]), ref_a_pt)
data.validate_spect_data_set(data_set)
@pytest.mark.gpu
def test_validate_spect_data_set_cuda(temp_dir):
torch.manual_seed(29)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
ref_dir = os.path.join(temp_dir, "ref")
feats_pt = os.path.join(feat_dir, "a.pt")
ali_pt = os.path.join(ali_dir, "a.pt")
ref_pt = os.path.join(ref_dir, "a.pt")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
os.makedirs(ref_dir)
torch.save(torch.rand(10, 5), feats_pt)
torch.save(torch.randint(10, (10,), dtype=torch.long), ali_pt)
torch.save(torch.tensor([1, 2, 3]), ref_pt)
data_set = data.SpectDataSet(temp_dir)
data.validate_spect_data_set(data_set)
torch.save(torch.rand(10, 5).cuda(), feats_pt)
with pytest.raises(ValueError, match="cuda"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True)
data.validate_spect_data_set(data_set)
torch.save(torch.rand(10, 5).cuda(), feats_pt)
torch.save(torch.randint(10, (10,), dtype=torch.long).cuda(), ali_pt)
torch.save(torch.tensor([1, 2, 3]).cuda(), ref_pt)
with pytest.raises(ValueError, match="cuda"):
data.validate_spect_data_set(data_set)
with pytest.warns(UserWarning):
data.validate_spect_data_set(data_set, True)
data.validate_spect_data_set(data_set)
@pytest.mark.cpu
@pytest.mark.parametrize("processes", [0, 2])
def test_read_trn(processes):
trn = StringIO()
trn.write(
"""\
here is a simple example (a)
nothing should go wrong (b)
"""
)
trn.seek(0)
act = data.read_trn(trn, processes=processes, chunk_size=1)
assert act == [
("a", ["here", "is", "a", "simple", "example"]),
("b", ["nothing", "should", "go", "wrong"]),
]
trn.seek(0)
trn.write(
"""\
here is an { example /with} some alternates (a)
} and /here/ is {something really / {really}} (stupid) { ignore this (b)
(c)
a11 (d)
"""
)
trn.seek(0)
act = data.read_trn(trn, warn=False, processes=processes)
assert act == [
(
"a",
[
"here",
"is",
"an",
([["example"], ["with"]], -1, -1),
"some",
"alternates",
],
),
(
"b",
[
"}",
"and",
"/here/",
"is",
([["something", "really"], [[["really"]]]], -1, -1),
"(stupid)",
],
),
("c", []),
("d", ["a11"]),
]
@pytest.mark.cpu
def test_read_ctm():
ctm = StringIO()
ctm.write(
"""\
utt1 A 0.0 0.1 a
utt1 A 0.5 0.1 c ;; ctm files should always be ordered, but we tolerate
;; different orders
utt2 B 0.1 1.0 d
utt1 B 0.4 0.3 b
;; utt2 A 0.2 1.0 f
"""
)
ctm.seek(0)
act = data.read_ctm(ctm)
assert act == [
("utt1", [("a", 0.0, 0.1), ("b", 0.4, 0.7), ("c", 0.5, 0.6)]),
("utt2", [("d", 0.1, 1.1)]),
]
ctm.seek(0)
act = data.read_ctm(
ctm, {("utt1", "A"): "foo", ("utt1", "B"): "bar", ("utt2", "B"): "baz"}
)
assert act == [
("foo", [("a", 0.0, 0.1), ("c", 0.5, 0.6)]),
("baz", [("d", 0.1, 1.1)]),
("bar", [("b", 0.4, 0.7)]),
]
with pytest.raises(ValueError):
ctm.write("utt3 -0.1 1.0 woop\n")
ctm.seek(0)
data.read_ctm(ctm)
@pytest.mark.cpu
def test_write_trn():
trn = StringIO()
transcripts = [
("a", ["again", "a", "simple", "example"]),
("b", ["should", "get", "right", "no", "prob"]),
]
data.write_trn(transcripts, trn)
trn.seek(0)
assert (
"""\
again a simple example (a)
should get right no prob (b)
"""
== trn.read()
)
trn.seek(0)
trn.truncate()
transcripts = [
(
" c ",
[
("unnecessary", -1, -1),
([["complexity", [["can"]]], ["also", "be"]], 10, 4),
"handled",
],
),
("d", []),
("e", ["a11"]),
]
data.write_trn(transcripts, trn)
trn.seek(0)
assert (
"""\
unnecessary { complexity { can } / also be } handled ( c )
(d)
a11 (e)
"""
== trn.read()
)
@pytest.mark.cpu
def test_write_ctm():
ctm = StringIO()
transcripts = [
(
"c",
[
("here", 0.1, 0.2),
("are", 0.3, 0.5),
("some", 0.2, 0.4),
("unordered", 0.5, 0.5),
("tokens", 10.0, 1000),
],
),
("b", []),
("a", [("hullo", 0.0, 10.0111)]),
]
data.write_ctm(transcripts, ctm)
ctm.seek(0)
assert (
"""\
a A 0.0 10.0111 hullo
c A 0.1 0.1 here
c A 0.2 0.2 some
c A 0.3 0.2 are
c A 0.5 0.0 unordered
c A 10.0 990.0 tokens
"""
== ctm.read()
)
ctm.seek(0)
ctm.truncate()
data.write_ctm(
transcripts,
ctm,
{"a": ("last", "A"), "b": ("middle", "B"), "c": ("first", "C")},
)
ctm.seek(0)
assert (
"""\
first C 0.1 0.1 here
first C 0.2 0.2 some
first C 0.3 0.2 are
first C 0.5 0.0 unordered
first C 10.0 990.0 tokens
last A 0.0 10.0111 hullo
"""
== ctm.read()
)
transcripts.append(("foo", [("a", 0.1, 0.2), ("b", 0.2, 0.1)]))
with pytest.raises(ValueError):
data.write_ctm(transcripts, ctm)
@pytest.mark.cpu
@pytest.mark.parametrize(
"transcript,token2id,unk,skip_frame_times,exp",
[
([], None, None, False, torch.LongTensor(0, 3)),
(
[1, 2, 3, 4],
None,
None,
True,
torch.LongTensor([1, 2, 3, 4]),
),
(
[1, ("a", 4, 10), "a", 3],
{"a": 2},
None,
False,
torch.LongTensor([[1, -1, -1], [2, 4, 10], [2, -1, -1], [3, -1, -1]]),
),
(
["foo", 1, "bar"],
{"foo": 0, "baz": 3},
"baz",
False,
torch.LongTensor([[0, -1, -1], [3, -1, -1], [3, -1, -1]]),
),
],
)
def test_transcript_to_token(transcript, token2id, unk, skip_frame_times, exp):
act = data.transcript_to_token(
transcript, token2id, unk=unk, skip_frame_times=skip_frame_times
)
assert torch.all(exp == act)
transcript = ["foo"] + transcript
with pytest.raises(Exception):
data.transcript_to_token(transcript, token2id)
@pytest.mark.cpu
def test_transcript_to_token_frame_shift():
trans = [(12, 0.5, 0.81), 420, (1, 2.1, 2.2), (3, 2.8, 2.815), (12, 2.9, 3.0025)]
tok = data.transcript_to_token(trans, frame_shift_ms=10)
assert torch.allclose(
tok,
torch.LongTensor(
[[12, 50, 81], [420, -1, -1], [1, 210, 220], [3, 280, 282], [12, 290, 300]]
),
)
tok = data.transcript_to_token(trans, frame_shift_ms=1 / 8)
assert torch.allclose(
tok,
torch.LongTensor(
[
[12, 4000, 6480],
[420, -1, -1],
[1, 16800, 17600],
[3, 22400, 22520],
[12, 23200, 24020],
]
),
)
@pytest.mark.cpu
@pytest.mark.parametrize(
"tok,id2token,exp",
[
(torch.LongTensor(0, 3), None, []),
(
torch.LongTensor([[1, -1, -1], [2, -1, -1], [3, -1, -1], [4, -1, -1]]),
None,
[1, 2, 3, 4],
),
(
torch.LongTensor([[1, 3, 4], [3, 4, 5], [2, -1, -1]]),
{1: "a", 2: "b"},
[("a", 3, 4), (3, 4, 5), "b"],
),
(torch.tensor(range(10)), None, list(range(10))),
(torch.tensor(range(5)).unsqueeze(-1), None, list(range(5))),
],
)
def test_token_to_transcript(tok, id2token, exp):
act = data.token_to_transcript(tok, id2token)
assert exp == act
@pytest.mark.cpu
def test_token_to_transcript_frame_shift():
tok = torch.LongTensor([[1, -1, 10], [2, 1000, 2000], [3, 12345, 678910]])
trans = data.token_to_transcript(tok, frame_shift_ms=10)
assert trans == [1, (2, 10.0, 20.0), (3, 123.45, 6789.10)]
trans = data.token_to_transcript(tok, frame_shift_ms=1 / 8)
assert trans == [
1,
(2, 1000 / 8000, 2000 / 8000),
(3, 12345 / 8000, 678910 / 8000),
]
@pytest.mark.cpu
@pytest.mark.parametrize("reverse", [True, False])
def test_context_window_data_set(temp_dir, reverse):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
os.makedirs(feat_dir)
a = torch.rand(2, 10)
torch.save(a, os.path.join(feat_dir, "a.pt"))
data_set = data.ContextWindowDataSet(temp_dir, 1, 1, reverse=reverse)
windowed, _ = data_set[0]
assert tuple(windowed.shape) == (2, 3, 10)
if reverse:
assert torch.allclose(a[0], windowed[0, 1:])
assert torch.allclose(a[1], windowed[0, 0])
assert torch.allclose(a[0], windowed[1, 2])
assert torch.allclose(a[1], windowed[1, :2])
else:
assert torch.allclose(a[0], windowed[0, :2])
assert torch.allclose(a[1], windowed[0, 2])
assert torch.allclose(a[0], windowed[1, 0])
assert torch.allclose(a[1], windowed[1, 1:])
@pytest.mark.cpu
def test_epoch_random_sampler(temp_dir):
data_source = torch.utils.data.TensorDataset(torch.arange(100))
sampler = data.EpochRandomSampler(data_source, base_seed=1)
samples_ep0 = tuple(sampler)
samples_ep1 = tuple(sampler)
assert samples_ep0 != samples_ep1
assert sorted(samples_ep0) == list(range(100))
assert sorted(samples_ep1) == list(range(100))
assert samples_ep0 == tuple(sampler.get_samples_for_epoch(0))
assert samples_ep1 == tuple(sampler.get_samples_for_epoch(1))
sampler = data.EpochRandomSampler(data_source, init_epoch=10, base_seed=1)
assert samples_ep0 == tuple(sampler.get_samples_for_epoch(0))
assert samples_ep1 == tuple(sampler.get_samples_for_epoch(1))
torch.manual_seed(5)
sampler = data.EpochRandomSampler(data_source)
samples_ep0 = tuple(sampler)
torch.manual_seed(5)
sampler = data.EpochRandomSampler(data_source)
assert samples_ep0 == tuple(sampler)
@pytest.mark.cpu
@pytest.mark.parametrize(
"feat_sizes",
[((3, 5, 4), (4, 5, 4), (1, 5, 4)), ((2, 10, 5),) * 10],
ids=["short", "long"],
)
@pytest.mark.parametrize("include_ali", [True, False])
def test_context_window_seq_to_batch(feat_sizes, include_ali):
torch.manual_seed(1)
feats = tuple(torch.rand(*x) for x in feat_sizes)
if include_ali:
alis = tuple(torch.randint(10, (x[0],), dtype=torch.long) for x in feat_sizes)
else:
alis = repeat(None)
seq = zip(feats, alis)
batch_feats, batch_ali = data.context_window_seq_to_batch(seq)
assert torch.allclose(torch.cat(feats), batch_feats)
if include_ali:
assert torch.all(torch.cat(alis) == batch_ali)
else:
assert batch_ali is None
@pytest.mark.cpu
@pytest.mark.parametrize("include_ali", [True, False])
@pytest.mark.parametrize(
"include_ref,include_frame_shift", [(True, True), (True, False), (False, None)]
)
@pytest.mark.parametrize("batch_first", [True, False])
def test_spect_seq_to_batch(include_ali, include_ref, batch_first, include_frame_shift):
torch.manual_seed(1)
feat_sizes = tuple(
torch.randint(1, 30, (1,)).item()
for _ in range(torch.randint(3, 10, (1,)).item())
)
feats = tuple(torch.randn(x, 5) for x in feat_sizes)
if include_ali:
alis = tuple(torch.randint(100, (x,), dtype=torch.long) for x in feat_sizes)
else:
alis = repeat(None)
if include_ref:
ref_sizes = tuple(
torch.randint(1, 30, (1,)).item() for _ in range(len(feat_sizes))
)
extra_dim = (3,) if include_frame_shift else tuple()
refs = tuple(
torch.randint(100, (x,) + extra_dim, dtype=torch.long) for x in ref_sizes
)
else:
ref_sizes = repeat(None)
refs = repeat(None)
(
batch_feats,
batch_ali,
batch_ref,
batch_feat_sizes,
batch_ref_sizes,
) = data.spect_seq_to_batch(zip(feats, alis, refs), batch_first=batch_first)
feat_sizes, feats, alis, refs, ref_sizes = zip(
*sorted(zip(feat_sizes, feats, alis, refs, ref_sizes), key=lambda x: -x[0])
)
assert torch.all(torch.tensor(feat_sizes) == batch_feat_sizes)
if not batch_first:
batch_feats = batch_feats.transpose(0, 1)
if include_ali:
batch_ali = batch_ali.transpose(0, 1)
if include_ref:
batch_ref = batch_ref.transpose(0, 1)
assert all(
torch.allclose(a[: b.shape[0]], b)
and torch.allclose(a[b.shape[0] :], torch.tensor([0.0]))
for (a, b) in zip(batch_feats, feats)
)
if include_ali:
assert all(
torch.all(a[: b.shape[0]] == b)
and torch.all(a[b.shape[0] :] == torch.tensor([INDEX_PAD_VALUE]))
for (a, b) in zip(batch_ali, alis)
)
else:
assert batch_ali is None
if include_ref:
assert torch.all(torch.tensor(ref_sizes) == batch_ref_sizes)
assert all(
torch.all(a[: b.shape[0]] == b)
and torch.all(a[b.shape[0] :] == torch.tensor([INDEX_PAD_VALUE]))
for (a, b) in zip(batch_ref, refs)
)
else:
assert batch_ref is None
assert batch_ref_sizes is None
@pytest.mark.cpu
@pytest.mark.parametrize("eos", [None, -1])
@pytest.mark.parametrize("sos", [None, -2])
@pytest.mark.parametrize("split_params", [True, False])
@pytest.mark.parametrize("include_frame_shift", [True, False])
@pytest.mark.parametrize("feat_dtype", [torch.float, torch.int])
def test_spect_training_data_loader(
temp_dir,
populate_torch_dir,
sos,
eos,
split_params,
include_frame_shift,
feat_dtype,
):
torch.manual_seed(40)
num_utts, batch_size, num_filts = 20, 5, 11
populate_torch_dir(
temp_dir,
num_utts,
num_filts=num_filts,
include_frame_shift=include_frame_shift,
feat_dtype=feat_dtype,
)
if split_params:
params = data.DataSetParams(batch_size=batch_size)
data_params = data.SpectDataParams(sos=sos, eos=eos)
else:
params = data.SpectDataSetParams(batch_size=batch_size, sos=sos, eos=eos)
data_params = None
data_loader = data.SpectTrainingDataLoader(
temp_dir, params, data_params=data_params, ali_subdir=None, seed=2
)
assert next(iter(data_loader))[1] is None
data_loader = data.SpectTrainingDataLoader(
temp_dir, params, data_params=data_params, ref_subdir=None, seed=2
)
assert next(iter(data_loader))[2] is None
assert next(iter(data_loader))[4] is None
data_loader = data.SpectTrainingDataLoader(
temp_dir, params, data_params=data_params, seed=2
)
def _get_epoch(sort):
ep_feats, ep_ali, ep_ref = [], [], []
ep_feat_sizes, ep_ref_sizes = [], []
max_T = 0
max_R = 0
batch_first = data_loader.batch_first
for b_feats, b_ali, b_ref, b_feat_sizes, b_ref_sizes in data_loader:
if not batch_first:
b_feats = b_feats.transpose(0, 1)
b_ali = b_ali.transpose(0, 1)
b_ref = b_ref.transpose(0, 1)
max_T = max(max_T, b_feat_sizes[0])
R_star = max(b_ref_sizes)
max_R = max(max_R, R_star)
assert b_feats.shape[0] == batch_size
assert b_ali.shape[0] == batch_size
assert b_ref.shape[0] == batch_size
assert b_feats.shape[-1] == num_filts
assert b_feats.shape[1] == b_feat_sizes[0]
assert b_ali.shape[1] == b_feat_sizes[0]
assert b_ref.shape[1] == R_star
assert b_ref.dim() == (3 if include_frame_shift else 2)
ep_feats += tuple(b_feats)
ep_ali += tuple(b_ali)
ep_ref += tuple(b_ref)
ep_feat_sizes += tuple(b_feat_sizes)
ep_ref_sizes += tuple(b_ref_sizes)
assert len(ep_feats) == num_utts
assert len(ep_ali) == num_utts
for i in range(num_utts):
ep_feats[i] = torch.nn.functional.pad(
ep_feats[i], (0, 0, 0, max_T - ep_ali[i].shape[0])
)
ep_ali[i] = torch.nn.functional.pad(
ep_ali[i], (0, max_T - ep_ali[i].shape[0]), value=INDEX_PAD_VALUE
)
if include_frame_shift:
ep_ref[i] = torch.nn.functional.pad(
ep_ref[i],
(0, 0, 0, max_R - ep_ref[i].shape[0]),
value=INDEX_PAD_VALUE,
)
else:
ep_ref[i] = torch.nn.functional.pad(
ep_ref[i], (0, max_R - ep_ref[i].shape[0]), value=INDEX_PAD_VALUE
)
if sort:
ep_feats, ep_ali, ep_ref, ep_feat_sizes, ep_ref_sizes = zip(
*sorted(
zip(ep_feats, ep_ali, ep_ref, ep_feat_sizes, ep_ref_sizes),
key=lambda x: (-x[3], -x[4], x[0][0, 0]),
)
)
return ep_feats, ep_ali, ep_ref, ep_feat_sizes, ep_ref_sizes
def _compare_epochs(ep_a, ep_b, same):
a_feats, a_ali, a_ref, a_feat_sizes, a_ref_sizes = ep_a
b_feats, b_ali, b_ref, b_feat_sizes, b_ref_sizes = ep_b
a_feats, b_feats = torch.stack(a_feats), torch.stack(b_feats)
a_ali, b_ali = torch.stack(a_ali), torch.stack(b_ali)
a_ref, b_ref = torch.stack(a_ref), torch.stack(b_ref)
if same:
assert a_feat_sizes == b_feat_sizes
assert a_ref_sizes == b_ref_sizes
assert torch.allclose(a_feats, b_feats)
assert torch.all(a_ali == b_ali)
assert torch.all(a_ref == b_ref)
else:
assert a_feat_sizes != b_feat_sizes
assert a_ref_sizes != b_ref_sizes
assert not torch.allclose(a_feats, b_feats)
assert torch.any(a_ali != b_ali)
assert torch.any(a_ref != b_ref)
ep0 = _get_epoch(False)
ep1 = _get_epoch(False)
_compare_epochs(ep0, ep1, False)
_compare_epochs(_get_epoch(True), _get_epoch(True), True)
data_loader.epoch = 1
_compare_epochs(ep1, _get_epoch(False), True)
data_loader = data.SpectTrainingDataLoader(
temp_dir, params, data_params=data_params, num_workers=2, seed=2
)
_compare_epochs(ep0, _get_epoch(False), True)
_compare_epochs(ep1, _get_epoch(False), True)
data_loader.batch_first = False
data_loader.epoch = 0
_compare_epochs(ep0, _get_epoch(False), True)
_compare_epochs(ep1, _get_epoch(False), True)
@pytest.mark.cpu
@pytest.mark.parametrize("eos", [None, -1])
@pytest.mark.parametrize("sos", [None, -2])
@pytest.mark.parametrize("split_params", [True, False])
@pytest.mark.parametrize("include_frame_shift", [True, False])
@pytest.mark.parametrize("feat_dtype", [torch.float, torch.int])
def test_spect_evaluation_data_loader(
temp_dir,
populate_torch_dir,
sos,
eos,
split_params,
include_frame_shift,
feat_dtype,
):
torch.manual_seed(41)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
batch_size = 5
if split_params:
params = data.DataSetParams(batch_size=batch_size)
data_params = data.SpectDataParams(sos=sos, eos=eos)
else:
params = data.SpectDataSetParams(batch_size=batch_size, sos=sos, eos=eos)
data_params = None
feats, ali, ref, feat_sizes, ref_sizes, utt_ids = populate_torch_dir(
temp_dir, 20, include_frame_shift=include_frame_shift, feat_dtype=feat_dtype
)
if sos is not None:
if include_frame_shift:
sos_sym = torch.full((3,), -1, dtype=torch.long)
sos_sym[0] = sos
sos_sym = sos_sym.unsqueeze(0)
else:
sos_sym = torch.full((1,), sos, dtype=torch.long)
ref = [torch.cat([sos_sym, x], 0) for x in ref]
ref_sizes = [x + 1 for x in ref_sizes]
if eos is not None:
if include_frame_shift:
eos_sym = torch.full((3,), eos, dtype=torch.long)
eos_sym[0] = eos
eos_sym = eos_sym.unsqueeze(0)
else:
eos_sym = torch.full((1,), eos, dtype=torch.long)
ref = [torch.cat([x, eos_sym], 0) for x in ref]
ref_sizes = [x + 1 for x in ref_sizes]
data_loader = data.SpectEvaluationDataLoader(
temp_dir, params, data_params=data_params, ali_subdir=None, ref_subdir=None
)
assert next(iter(data_loader))[1:3] == (None, None)
assert next(iter(data_loader))[4] is None
data_loader = data.SpectEvaluationDataLoader(
temp_dir, params, data_params=data_params
)
def _compare_data_loader():
batch_first = data_loader.batch_first
assert len(data_loader) == 4
cur_idx = 0
for (
b_feats,
b_ali,
b_ref,
b_feat_sizes,
b_ref_sizes,
b_utt_ids,
) in data_loader:
if not batch_first:
b_feats = b_feats.transpose(0, 1)
b_ali = b_ali.transpose(0, 1)
b_ref = b_ref.transpose(0, 1)
R_star = max(b_ref_sizes)
assert tuple(b_feats.shape) == (5, b_feat_sizes[0], 5)
assert tuple(b_ali.shape) == (5, b_feat_sizes[0])
if include_frame_shift:
assert tuple(b_ref.shape) == (5, R_star, 3)
else:
assert tuple(b_ref.shape) == (5, R_star)
s_feats, s_ali, s_ref, s_feat_sizes, s_ref_sizes, s_utt_ids = zip(
*sorted(
zip(
feats[cur_idx : cur_idx + 5],
ali[cur_idx : cur_idx + 5],
ref[cur_idx : cur_idx + 5],
feat_sizes[cur_idx : cur_idx + 5],
ref_sizes[cur_idx : cur_idx + 5],
utt_ids[cur_idx : cur_idx + 5],
),
key=lambda x: -x[3],
)
)
assert b_utt_ids == s_utt_ids
assert tuple(b_feat_sizes) == s_feat_sizes
assert tuple(b_ref_sizes) == s_ref_sizes
for a, b in zip(b_feats, s_feats):
assert torch.allclose(a[: b.shape[0]], b)
assert torch.allclose(
a[b.shape[0] :], torch.tensor([0], dtype=feat_dtype)
)
for a, b in zip(b_ali, s_ali):
assert torch.all(a[: b.shape[0]] == b)
assert torch.all(a[b.shape[0] :] == torch.tensor([INDEX_PAD_VALUE]))
for a, b in zip(b_ref, s_ref):
assert torch.all(a[: b.shape[0]] == b)
assert torch.all(a[b.shape[0] :] == torch.tensor([INDEX_PAD_VALUE]))
cur_idx += 5
_compare_data_loader()
_compare_data_loader()
data_loader = data.SpectEvaluationDataLoader(
temp_dir, params, data_params=data_params, num_workers=2
)
_compare_data_loader()
data_loader.batch_first = False
_compare_data_loader()
@pytest.mark.cpu
@pytest.mark.parametrize("split_params", [True, False])
def test_window_training_data_loader(temp_dir, populate_torch_dir, split_params):
populate_torch_dir(temp_dir, 5, num_filts=2)
seed, batch_size, context_left, context_right = 2, 5, 1, 1
if split_params:
params = data.DataSetParams(batch_size=batch_size, drop_last=True)
data_params = data.ContextWindowDataParams(
context_left=context_left, context_right=context_right
)
else:
params = data.ContextWindowDataSetParams(
context_left=context_left,
context_right=context_right,
batch_size=batch_size,
drop_last=True,
)
data_params = None
data_loader = data.ContextWindowTrainingDataLoader(
temp_dir, params, data_params=data_params, seed=seed
)
total_windows_ep0 = 0
for feat, ali in data_loader:
windows = feat.shape[0]
assert tuple(feat.shape) == (windows, 3, 2)
assert tuple(ali.shape) == (windows,)
total_windows_ep0 += windows
assert total_windows_ep0 >= batch_size
feats_ep1_a, alis_ep1_a = [], []
total_windows_ep1 = 0
for feats, alis in data_loader:
windows = feat.shape[0]
assert tuple(feat.shape) == (windows, 3, 2)
assert tuple(ali.shape) == (windows,)
feats_ep1_a.append(feats)
alis_ep1_a.append(alis)
total_windows_ep1 += windows
assert total_windows_ep0 == total_windows_ep1
data_loader = data.ContextWindowTrainingDataLoader(
temp_dir,
params,
init_epoch=1,
data_params=data_params,
num_workers=2,
seed=seed,
)
feats_ep1_b, alis_ep1_b = [], []
for feats, alis in data_loader:
feats_ep1_b.append(feats)
alis_ep1_b.append(alis)
assert all(
torch.allclose(feats_a, feats_b)
for (feats_a, feats_b) in zip(feats_ep1_a, feats_ep1_b)
)
assert all(
torch.all(alis_a == alis_b) for (alis_a, alis_b) in zip(alis_ep1_a, alis_ep1_b)
)
data_loader.epoch = 1
feats_ep1_c, alis_ep1_c = [], []
for feats, alis in data_loader:
feats_ep1_c.append(feats)
alis_ep1_c.append(alis)
assert all(
torch.allclose(feats_a, feats_c)
for (feats_a, feats_c) in zip(feats_ep1_a, feats_ep1_c)
)
assert all(
torch.all(alis_a == alis_c) for (alis_a, alis_c) in zip(alis_ep1_a, alis_ep1_c)
)
@pytest.mark.cpu
@pytest.mark.parametrize("split_params", [True, False])
def test_window_evaluation_data_loader(temp_dir, populate_torch_dir, split_params):
torch.manual_seed(1)
feat_dir = os.path.join(temp_dir, "feat")
ali_dir = os.path.join(temp_dir, "ali")
os.makedirs(feat_dir)
os.makedirs(ali_dir)
if split_params:
params = data.DataSetParams(batch_size=5)
data_params = data.ContextWindowDataParams(context_left=1, context_right=1)
else:
params = data.ContextWindowDataSetParams(
context_left=1, context_right=1, batch_size=5
)
data_params = None
feats, alis, _, feat_sizes, _, utt_ids = populate_torch_dir(
temp_dir, 20, include_ref=False
)
def _compare_data_loader(data_loader):
assert len(data_loader) == 4
cur_idx = 0
for b_feats, b_alis, b_feat_sizes, b_utt_ids in data_loader:
assert tuple(b_feats.shape[1:]) == (3, 5)
assert b_feats.shape[0] == sum(b_feat_sizes)
assert tuple(b_utt_ids) == tuple(utt_ids[cur_idx : cur_idx + 5])
assert torch.allclose(
b_feats[:, 1], torch.cat(feats[cur_idx : cur_idx + 5])
)
assert torch.all(b_alis == torch.cat(alis[cur_idx : cur_idx + 5]))
cur_idx += 5
data_loader = data.ContextWindowEvaluationDataLoader(
temp_dir, params, data_params=data_params, ali_subdir=None
)
assert next(iter(data_loader))[1] is None
data_loader = data.ContextWindowEvaluationDataLoader(
temp_dir, params, data_params=data_params
)
_compare_data_loader(data_loader)
_compare_data_loader(data_loader)
data_loader = data.ContextWindowEvaluationDataLoader(
temp_dir, params, data_params=data_params, num_workers=2
)
_compare_data_loader(data_loader)
@pytest.mark.cpu
def test_pydrobert_param_optuna_hooks():
poptuna = pytest.importorskip("pydrobert.param.optuna")
optuna = pytest.importorskip("optuna")
for class_ in (
data.DataSetParams,
data.SpectDataSetParams,
data.ContextWindowDataParams,
data.ContextWindowDataSetParams,
):
assert issubclass(class_, poptuna.TunableParameterized)
global_dict = {
"data_set": data.DataSetParams(),
"spect_data": data.SpectDataParams(),
"spect_data_set": data.SpectDataSetParams(),
"context_window_data": data.ContextWindowDataParams(),
"context_window_data_set": data.ContextWindowDataSetParams(),
}
assert {
"data_set.batch_size",
"spect_data.eos",
"spect_data_set.batch_size",
"context_window_data.reverse",
"context_window_data_set.batch_size",
} - poptuna.get_param_dict_tunable(global_dict) == {"spect_data.eos"}
def objective(trial):
param_dict = poptuna.suggest_param_dict(trial, global_dict)
return param_dict["data_set"].batch_size
sampler = optuna.samplers.RandomSampler(seed=5)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
assert (
not {
"data_set.batch_size",
"spect_data_set.batch_size",
"context_window_data.reverse",
"context_window_data_set.batch_size",
}
- set(study.best_params)
)
assert study.best_params["data_set.batch_size"] < 7
| true | true |
f731b8a53d9c151e77995f19a47fbebe678c89cd | 769 | py | Python | create.py | keatonkraiger/pysot-mot | b1447dc9569339592a09b25b097b363cff9f6de4 | [
"Apache-2.0"
] | null | null | null | create.py | keatonkraiger/pysot-mot | b1447dc9569339592a09b25b097b363cff9f6de4 | [
"Apache-2.0"
] | null | null | null | create.py | keatonkraiger/pysot-mot | b1447dc9569339592a09b25b097b363cff9f6de4 | [
"Apache-2.0"
] | null | null | null | import os
import re
import imageio
from glob import glob
from PIL import Image
SAVE_FORMAT = 'gif'
video_name = 'ants1'
image_folder = os.path.join(os.getcwd(), 'demo/demo_images/')
#images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
#images.sort(key=lambda var:[int(x) if x.isdigit() else x for
# x in re.findall(r'[^0-9]|[0-9]+', var)])
if SAVE_FORMAT == 'gif':
images = []
images = glob("demo/demo_images/*.jpg")
images.sort(key=lambda var:[int(x) if x.isdigit() else x for
x in re.findall(r'[^0-9]|[0-9]+', var)])
img, *imgs = [Image.open(f) for f in images]
img.save(fp='demo/output/{}.gif'.format(video_name), format='GIF', append_images=imgs,
save_all=True, duration=10, loop=0)
| 24.03125 | 90 | 0.638492 | import os
import re
import imageio
from glob import glob
from PIL import Image
SAVE_FORMAT = 'gif'
video_name = 'ants1'
image_folder = os.path.join(os.getcwd(), 'demo/demo_images/')
if SAVE_FORMAT == 'gif':
images = []
images = glob("demo/demo_images/*.jpg")
images.sort(key=lambda var:[int(x) if x.isdigit() else x for
x in re.findall(r'[^0-9]|[0-9]+', var)])
img, *imgs = [Image.open(f) for f in images]
img.save(fp='demo/output/{}.gif'.format(video_name), format='GIF', append_images=imgs,
save_all=True, duration=10, loop=0)
| true | true |
f731b9052df0f2dc9c9b090455306ea466ce939c | 253 | py | Python | cmdline_provenance/__init__.py | znicholls/cmdline_provenance | c9df55a9e4b0e7435499993c39eb9ff7e360b1c6 | [
"MIT"
] | null | null | null | cmdline_provenance/__init__.py | znicholls/cmdline_provenance | c9df55a9e4b0e7435499993c39eb9ff7e360b1c6 | [
"MIT"
] | null | null | null | cmdline_provenance/__init__.py | znicholls/cmdline_provenance | c9df55a9e4b0e7435499993c39eb9ff7e360b1c6 | [
"MIT"
] | null | null | null | """Utilities for capturing the history of commands used to produce a given output"""
from .cmdline_provenance import new_log
from .cmdline_provenance import read_log
from .cmdline_provenance import write_log
__all__ = [new_log, read_log, write_log]
| 25.3 | 84 | 0.810277 |
from .cmdline_provenance import new_log
from .cmdline_provenance import read_log
from .cmdline_provenance import write_log
__all__ = [new_log, read_log, write_log]
| true | true |
f731b912d65c6127ea588331a16ed36a53def65b | 3,387 | py | Python | homeassistant/components/tmb/sensor.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 6 | 2020-07-18T16:33:25.000Z | 2021-09-26T09:52:04.000Z | homeassistant/components/tmb/sensor.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 58 | 2020-08-03T07:33:02.000Z | 2022-03-31T06:02:05.000Z | homeassistant/components/tmb/sensor.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Support for TMB (Transports Metropolitans de Barcelona) Barcelona public transport."""
from datetime import timedelta
import logging
from requests import HTTPError
from tmb import IBus
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Transport Metropolitans de Barcelona"
ICON = "mdi:bus-clock"
CONF_APP_ID = "app_id"
CONF_APP_KEY = "app_key"
CONF_LINE = "line"
CONF_BUS_STOP = "stop"
CONF_BUS_STOPS = "stops"
ATTR_BUS_STOP = "stop"
ATTR_LINE = "line"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
LINE_STOP_SCHEMA = vol.Schema(
{
vol.Required(CONF_BUS_STOP): cv.string,
vol.Required(CONF_LINE): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_APP_ID): cv.string,
vol.Required(CONF_APP_KEY): cv.string,
vol.Required(CONF_BUS_STOPS): vol.All(cv.ensure_list, [LINE_STOP_SCHEMA]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensors."""
ibus_client = IBus(config[CONF_APP_ID], config[CONF_APP_KEY])
sensors = []
for line_stop in config.get(CONF_BUS_STOPS):
line = line_stop[CONF_LINE]
stop = line_stop[CONF_BUS_STOP]
if line_stop.get(CONF_NAME):
name = f"{line} - {line_stop[CONF_NAME]} ({stop})"
else:
name = f"{line} - {stop}"
sensors.append(TMBSensor(ibus_client, stop, line, name))
add_entities(sensors, True)
class TMBSensor(Entity):
"""Implementation of a TMB line/stop Sensor."""
def __init__(self, ibus_client, stop, line, name):
"""Initialize the sensor."""
self._ibus_client = ibus_client
self._stop = stop
self._line = line.upper()
self._name = name
self._unit = TIME_MINUTES
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return f"{self._stop}_{self._line}"
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the last update."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_BUS_STOP: self._stop,
ATTR_LINE: self._line,
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the next bus information."""
try:
self._state = self._ibus_client.get_stop_forecast(self._stop, self._line)
except HTTPError:
_LOGGER.error(
"Unable to fetch data from TMB API. Please check your API keys are valid"
)
| 27.991736 | 89 | 0.660171 | from datetime import timedelta
import logging
from requests import HTTPError
from tmb import IBus
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Transport Metropolitans de Barcelona"
ICON = "mdi:bus-clock"
CONF_APP_ID = "app_id"
CONF_APP_KEY = "app_key"
CONF_LINE = "line"
CONF_BUS_STOP = "stop"
CONF_BUS_STOPS = "stops"
ATTR_BUS_STOP = "stop"
ATTR_LINE = "line"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
LINE_STOP_SCHEMA = vol.Schema(
{
vol.Required(CONF_BUS_STOP): cv.string,
vol.Required(CONF_LINE): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_APP_ID): cv.string,
vol.Required(CONF_APP_KEY): cv.string,
vol.Required(CONF_BUS_STOPS): vol.All(cv.ensure_list, [LINE_STOP_SCHEMA]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
ibus_client = IBus(config[CONF_APP_ID], config[CONF_APP_KEY])
sensors = []
for line_stop in config.get(CONF_BUS_STOPS):
line = line_stop[CONF_LINE]
stop = line_stop[CONF_BUS_STOP]
if line_stop.get(CONF_NAME):
name = f"{line} - {line_stop[CONF_NAME]} ({stop})"
else:
name = f"{line} - {stop}"
sensors.append(TMBSensor(ibus_client, stop, line, name))
add_entities(sensors, True)
class TMBSensor(Entity):
def __init__(self, ibus_client, stop, line, name):
self._ibus_client = ibus_client
self._stop = stop
self._line = line.upper()
self._name = name
self._unit = TIME_MINUTES
self._state = None
@property
def name(self):
return self._name
@property
def icon(self):
return ICON
@property
def unit_of_measurement(self):
return self._unit
@property
def unique_id(self):
return f"{self._stop}_{self._line}"
@property
def state(self):
return self._state
@property
def device_state_attributes(self):
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_BUS_STOP: self._stop,
ATTR_LINE: self._line,
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
try:
self._state = self._ibus_client.get_stop_forecast(self._stop, self._line)
except HTTPError:
_LOGGER.error(
"Unable to fetch data from TMB API. Please check your API keys are valid"
)
| true | true |
f731b91eec02eb0ee49fd3752978d19259b17ce2 | 5,140 | py | Python | my_memory_card.py | all0ws/memorecard | ba90d4fd0663f11c963907290c19ad1305ba67cb | [
"CC0-1.0"
] | null | null | null | my_memory_card.py | all0ws/memorecard | ba90d4fd0663f11c963907290c19ad1305ba67cb | [
"CC0-1.0"
] | null | null | null | my_memory_card.py | all0ws/memorecard | ba90d4fd0663f11c963907290c19ad1305ba67cb | [
"CC0-1.0"
] | null | null | null |
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QApplication, QWidget, QHBoxLayout, QVBoxLayout, QGroupBox, QButtonGroup, QRadioButton, QPushButton, QLabel)
from random import shuffle,randint
class Question():
def __init__(self,question,right_answer,wrong1,wrong2 ,wrong3):
self.right_answer =right_answer
self.wrong1 =wrong1
self.wrong2 =wrong2
self.wrong3 = wrong3
self.question =question
question_list=[]
question_list.append (Question('Когда основали nvidia?','1993','2000','1874','2007'))
question_list.append (Question('Как зовут Путина?',' Вова','Владимер','Путин','Вася'))
question_list.append (Question('Государственный язык Кореи','Корейский ','Японский','Китайский','Русский'))
question_list.append (Question('Кто написал стих Ода','Ломоносов','Толстой','Царь батюшка','Я'))
question_list.append (Question('какой iphone последний?',' 12','11','xr','13'))
question_list.append (Question('Когда умер Стив Джобс?',' 2007','2020','1999','2000'))
question_list.append (Question('Кто такой Путин?','Призедент','Сталин','человек','что-то'))
question_list.append (Question('Сколько букв в Русском алфавите','33','32','26','23'))
question_list.append (Question('Сколько букв в английком алфавите?','26','33','22','31'))
question_list.append (Question('Сколько символов в Корейском алфавите','51','21','33','41'))
app = QApplication([])
btn_OK = QPushButton('Ответить')
lb_Question = QLabel('Самый сложный вопрос в мире!')
RadioGroupBox = QGroupBox("Варианты ответов")
rbtn_1 = QRadioButton('Вариант 1')
rbtn_2 = QRadioButton('Вариант 2')
rbtn_3 = QRadioButton('Вариант 3')
rbtn_4 = QRadioButton('Вариант 4')
RadioGroup = QButtonGroup()
RadioGroup.addButton(rbtn_1)
RadioGroup.addButton(rbtn_2)
RadioGroup.addButton(rbtn_3)
RadioGroup.addButton(rbtn_4)
layout_ans1 = QHBoxLayout()
layout_ans2 = QVBoxLayout()
layout_ans3 = QVBoxLayout()
layout_ans2.addWidget(rbtn_1)
layout_ans2.addWidget(rbtn_2)
layout_ans3.addWidget(rbtn_3)
layout_ans3.addWidget(rbtn_4)
layout_ans1.addLayout(layout_ans2)
layout_ans1.addLayout(layout_ans3)
RadioGroupBox.setLayout(layout_ans1)
AnsGroupBox = QGroupBox("Результат теста")
lb_Result = QLabel('прав ты или нет?')
lb_Correct = QLabel('ответ будет тут!')
layout_res = QVBoxLayout()
layout_res.addWidget(lb_Result, alignment=(Qt.AlignLeft | Qt.AlignTop))
layout_res.addWidget(lb_Correct, alignment=Qt.AlignHCenter, stretch=2)
AnsGroupBox.setLayout(layout_res)
layout_line1 = QHBoxLayout()
layout_line2 = QHBoxLayout()
layout_line3 = QHBoxLayout()
layout_line1.addWidget(lb_Question, alignment=(Qt.AlignHCenter | Qt.AlignVCenter))
layout_line2.addWidget(RadioGroupBox)
layout_line2.addWidget(AnsGroupBox)
AnsGroupBox.hide()
layout_line3.addStretch(1)
layout_line3.addWidget(btn_OK, stretch=2)
layout_line3.addStretch(1)
layout_card = QVBoxLayout()
layout_card.addLayout(layout_line1, stretch=2)
layout_card.addLayout(layout_line2, stretch=8)
layout_card.addStretch(1)
layout_card.addLayout(layout_line3, stretch=1)
layout_card.addStretch(1)
layout_card.setSpacing(5)
def show_result():
''' показать панель ответов '''
RadioGroupBox.hide()
AnsGroupBox.show()
btn_OK.setText('Следующий вопрос')
def show_question():
''' показать панель вопросов '''
RadioGroupBox.show()
AnsGroupBox.hide()
btn_OK.setText('Ответить')
RadioGroup.setExclusive(False)
rbtn_1.setChecked(False)
rbtn_2.setChecked(False)
rbtn_3.setChecked(False)
rbtn_4.setChecked(False)
RadioGroup.setExclusive(True)
answers = [rbtn_1, rbtn_2, rbtn_3, rbtn_4]
def ask(q: Question):
''' функция записывает значения вопроса и ответов в соответствующие виджеты,
при этом варианты ответов распределяются случайным образом'''
shuffle(answers)
answers[0].setText(q.right_answer)
answers[1].setText(q.wrong1)
answers[2].setText(q.wrong2)
answers[3].setText(q.wrong3)
lb_Question.setText(q.question)
lb_Correct.setText(q.right_answer)
show_question()
def show_correct(res):
''' показать результат - установим переданный текст в надпись "результат" и покажем нужную панель '''
lb_Result.setText(res)
show_result()
def check_answer():
''' если выбран какой-то вариант ответа, то надо проверить и показать панель ответов'''
if answers[0].isChecked():
show_correct('Правильно!')
else:
if answers[1].isChecked() or answers[2].isChecked() or answers[3].isChecked():
show_correct('Неверно!')
def next_question():
cur_question = randint(0,len(question_list)-1)
q= question_list[cur_question]
ask(q)
def click_OK():
if btn_OK.text()=='Ответить':
check_answer()
else:
next_question()
window = QWidget()
window.setLayout(layout_card)
window.setWindowTitle('Memo Card')
btn_OK.clicked.connect(click_OK)
next_question()
window.show()
app.exec() | 31.533742 | 138 | 0.706809 |
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QApplication, QWidget, QHBoxLayout, QVBoxLayout, QGroupBox, QButtonGroup, QRadioButton, QPushButton, QLabel)
from random import shuffle,randint
class Question():
def __init__(self,question,right_answer,wrong1,wrong2 ,wrong3):
self.right_answer =right_answer
self.wrong1 =wrong1
self.wrong2 =wrong2
self.wrong3 = wrong3
self.question =question
question_list=[]
question_list.append (Question('Когда основали nvidia?','1993','2000','1874','2007'))
question_list.append (Question('Как зовут Путина?',' Вова','Владимер','Путин','Вася'))
question_list.append (Question('Государственный язык Кореи','Корейский ','Японский','Китайский','Русский'))
question_list.append (Question('Кто написал стих Ода','Ломоносов','Толстой','Царь батюшка','Я'))
question_list.append (Question('какой iphone последний?',' 12','11','xr','13'))
question_list.append (Question('Когда умер Стив Джобс?',' 2007','2020','1999','2000'))
question_list.append (Question('Кто такой Путин?','Призедент','Сталин','человек','что-то'))
question_list.append (Question('Сколько букв в Русском алфавите','33','32','26','23'))
question_list.append (Question('Сколько букв в английком алфавите?','26','33','22','31'))
question_list.append (Question('Сколько символов в Корейском алфавите','51','21','33','41'))
app = QApplication([])
btn_OK = QPushButton('Ответить')
lb_Question = QLabel('Самый сложный вопрос в мире!')
RadioGroupBox = QGroupBox("Варианты ответов")
rbtn_1 = QRadioButton('Вариант 1')
rbtn_2 = QRadioButton('Вариант 2')
rbtn_3 = QRadioButton('Вариант 3')
rbtn_4 = QRadioButton('Вариант 4')
RadioGroup = QButtonGroup()
RadioGroup.addButton(rbtn_1)
RadioGroup.addButton(rbtn_2)
RadioGroup.addButton(rbtn_3)
RadioGroup.addButton(rbtn_4)
layout_ans1 = QHBoxLayout()
layout_ans2 = QVBoxLayout()
layout_ans3 = QVBoxLayout()
layout_ans2.addWidget(rbtn_1)
layout_ans2.addWidget(rbtn_2)
layout_ans3.addWidget(rbtn_3)
layout_ans3.addWidget(rbtn_4)
layout_ans1.addLayout(layout_ans2)
layout_ans1.addLayout(layout_ans3)
RadioGroupBox.setLayout(layout_ans1)
AnsGroupBox = QGroupBox("Результат теста")
lb_Result = QLabel('прав ты или нет?')
lb_Correct = QLabel('ответ будет тут!')
layout_res = QVBoxLayout()
layout_res.addWidget(lb_Result, alignment=(Qt.AlignLeft | Qt.AlignTop))
layout_res.addWidget(lb_Correct, alignment=Qt.AlignHCenter, stretch=2)
AnsGroupBox.setLayout(layout_res)
layout_line1 = QHBoxLayout()
layout_line2 = QHBoxLayout()
layout_line3 = QHBoxLayout()
layout_line1.addWidget(lb_Question, alignment=(Qt.AlignHCenter | Qt.AlignVCenter))
layout_line2.addWidget(RadioGroupBox)
layout_line2.addWidget(AnsGroupBox)
AnsGroupBox.hide()
layout_line3.addStretch(1)
layout_line3.addWidget(btn_OK, stretch=2)
layout_line3.addStretch(1)
layout_card = QVBoxLayout()
layout_card.addLayout(layout_line1, stretch=2)
layout_card.addLayout(layout_line2, stretch=8)
layout_card.addStretch(1)
layout_card.addLayout(layout_line3, stretch=1)
layout_card.addStretch(1)
layout_card.setSpacing(5)
def show_result():
RadioGroupBox.hide()
AnsGroupBox.show()
btn_OK.setText('Следующий вопрос')
def show_question():
RadioGroupBox.show()
AnsGroupBox.hide()
btn_OK.setText('Ответить')
RadioGroup.setExclusive(False)
rbtn_1.setChecked(False)
rbtn_2.setChecked(False)
rbtn_3.setChecked(False)
rbtn_4.setChecked(False)
RadioGroup.setExclusive(True)
answers = [rbtn_1, rbtn_2, rbtn_3, rbtn_4]
def ask(q: Question):
shuffle(answers)
answers[0].setText(q.right_answer)
answers[1].setText(q.wrong1)
answers[2].setText(q.wrong2)
answers[3].setText(q.wrong3)
lb_Question.setText(q.question)
lb_Correct.setText(q.right_answer)
show_question()
def show_correct(res):
lb_Result.setText(res)
show_result()
def check_answer():
if answers[0].isChecked():
show_correct('Правильно!')
else:
if answers[1].isChecked() or answers[2].isChecked() or answers[3].isChecked():
show_correct('Неверно!')
def next_question():
cur_question = randint(0,len(question_list)-1)
q= question_list[cur_question]
ask(q)
def click_OK():
if btn_OK.text()=='Ответить':
check_answer()
else:
next_question()
window = QWidget()
window.setLayout(layout_card)
window.setWindowTitle('Memo Card')
btn_OK.clicked.connect(click_OK)
next_question()
window.show()
app.exec() | true | true |
f731baf2c2d2eefe7ec99e5e55ef74d57635e126 | 1,624 | py | Python | tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from generator import generator, generate
from openvino.tools.mo.ops.dft import FFTBase
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
@generator
class DFTSignalSizeCanonicalizationTest(unittest.TestCase):
@generate(*[
(int64_array([-1, 77]), int64_array([1, 2]), int64_array([2, 180, 180, 2]), int64_array([180, 77])),
(int64_array([390, 87]), int64_array([2, 0]), int64_array([2, 180, 180, 2]), int64_array([390, 87])),
(int64_array([600, -1, 40]),
int64_array([3, 0, 1]),
int64_array([7, 50, 130, 400, 2]),
int64_array([600, 7, 40])),
(int64_array([-1, 16, -1]),
int64_array([3, 0, 2]),
int64_array([7, 50, 130, 400, 2]),
int64_array([400, 16, 130])),
(int64_array([16, -1, -1]),
int64_array([3, 0, 2]),
int64_array([7, 50, 130, 400, 2]),
int64_array([16, 7, 130])),
(int64_array([-1, -1, 16]),
int64_array([3, 0, 2]),
int64_array([7, 50, 130, 400, 2]),
int64_array([400, 7, 16])),
(int64_array([-1, -1, -1]),
int64_array([3, 0, 2]),
int64_array([7, 50, 130, 400, 2]),
int64_array([400, 7, 130])),
])
def test_canonicalization(self, signal_size, axes, input_shape, expected_result):
canonicalized_signal_size = FFTBase.canonicalize_signal_size(signal_size, axes, input_shape)
self.assertTrue(np.array_equal(canonicalized_signal_size, expected_result))
| 38.666667 | 109 | 0.606527 |
import unittest
import numpy as np
from generator import generator, generate
from openvino.tools.mo.ops.dft import FFTBase
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
@generator
class DFTSignalSizeCanonicalizationTest(unittest.TestCase):
@generate(*[
(int64_array([-1, 77]), int64_array([1, 2]), int64_array([2, 180, 180, 2]), int64_array([180, 77])),
(int64_array([390, 87]), int64_array([2, 0]), int64_array([2, 180, 180, 2]), int64_array([390, 87])),
(int64_array([600, -1, 40]),
int64_array([3, 0, 1]),
int64_array([7, 50, 130, 400, 2]),
int64_array([600, 7, 40])),
(int64_array([-1, 16, -1]),
int64_array([3, 0, 2]),
int64_array([7, 50, 130, 400, 2]),
int64_array([400, 16, 130])),
(int64_array([16, -1, -1]),
int64_array([3, 0, 2]),
int64_array([7, 50, 130, 400, 2]),
int64_array([16, 7, 130])),
(int64_array([-1, -1, 16]),
int64_array([3, 0, 2]),
int64_array([7, 50, 130, 400, 2]),
int64_array([400, 7, 16])),
(int64_array([-1, -1, -1]),
int64_array([3, 0, 2]),
int64_array([7, 50, 130, 400, 2]),
int64_array([400, 7, 130])),
])
def test_canonicalization(self, signal_size, axes, input_shape, expected_result):
canonicalized_signal_size = FFTBase.canonicalize_signal_size(signal_size, axes, input_shape)
self.assertTrue(np.array_equal(canonicalized_signal_size, expected_result))
| true | true |
f731bcca089c5e02b3aa0fbb5699fe5db61f89d1 | 30,419 | py | Python | back of code/RSCFN/rsden/models/rsn_cluster # without 0 fuse cluster.py | lidongyv/Monocular-depth-esitimation-with-region-support-cvpr | 7715c91b9c9f88de5c0233923c3a073edf9b2ca8 | [
"Apache-2.0"
] | null | null | null | back of code/RSCFN/rsden/models/rsn_cluster # without 0 fuse cluster.py | lidongyv/Monocular-depth-esitimation-with-region-support-cvpr | 7715c91b9c9f88de5c0233923c3a073edf9b2ca8 | [
"Apache-2.0"
] | null | null | null | back of code/RSCFN/rsden/models/rsn_cluster # without 0 fuse cluster.py | lidongyv/Monocular-depth-esitimation-with-region-support-cvpr | 7715c91b9c9f88de5c0233923c3a073edf9b2ca8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: lidong
# @Date: 2018-03-20 18:01:52
# @Last Modified by: yulidong
# @Last Modified time: 2018-11-06 20:45:11
import torch
import numpy as np
import torch.nn as nn
import math
from math import ceil
from torch.autograd import Variable
from rsden.cluster_loss import *
from rsden import caffe_pb2
from rsden.models.utils import *
import time
cuda_id=3
group_dim=1
def mean_shift(feature,mean,bandwidth):
#feature shape c h w
for t in range(10):
#print(t)
dis=feature-mean
dis=torch.norm(dis,dim=0)
mask=torch.where(dis<bandwidth,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
mean=torch.sum((feature*mask).view(feature.shape[0],feature.shape[1]*feature.shape[2]),dim=1)/torch.sum(mask)
mean=mean.view([feature.shape[0],1,1])
return mean
def get_mask(feature,mean,bandwidth):
mean=mean.view([mean.shape[0],1,1])
dis=feature-mean
dis=torch.norm(dis,dim=0)
mask=torch.where(dis<bandwidth,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id))
#pixels=mask.nonzero()
return mask.float()
def re_label(mask,area,bandwidth):
index=torch.sum(area)
print(index)
count=torch.tensor(0).float().cuda(cuda_id)
for i in range(area.shape[0]):
mask[i,:,:]=torch.where(mask[i,:,:]>0,mask[i,:,:]+count,mask[i,:,:])
count+=area[i]
segment=torch.where(mask>0,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
final=torch.sum(mask,dim=0)/torch.sum(segment,dim=0)
final=torch.squeeze(final)
final=final/255
return mask,area,final
def refine_mask(mask):
pixels=mask.nonzero()
if torch.sum(mask)<400:
return mask
minx=torch.min(pixels[:,0])
maxx=torch.max(pixels[:,0])
miny=torch.min(pixels[:,1])
maxy=torch.max(pixels[:,1])
for i in range(1,torch.ceil((maxx-minx).float()/80).int()+1):
for j in range(1,torch.ceil((maxy-miny).float()/80).int()+1):
if torch.sum(mask[minx+80*(i-1):minx+80*i,miny+80*(j-1):miny+80*j])>400:
mask[minx+80*(i-1):minx+80*i,miny+80*(j-1):miny+80*j]*=i*j
areas=torch.unique(mask).sort()[0]
for i in range(1,len(areas)):
mask=torch.where(mask==areas[i],-torch.ones(1).float().cuda(cuda_id)*i,mask)
mask=-mask
return mask.float()
def fuse_mask(n_mask,r_mask):
base=torch.where(n_mask>0,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
areas=torch.max(n_mask)
#for i in range(1,torch.max(r_mask).long()+1):
i=1
shift=torch.where(r_mask==i,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
non_overlap=torch.where(base-shift==-1,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
overlap=shift-non_overlap
if torch.sum(non_overlap)/torch.sum(shift)>0.4:
areas+=1
n_mask=torch.where(non_overlap==1,areas,n_mask)
base=torch.where(n_mask>0,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
#print(areas)
else:
area_num=torch.argmax(torch.bincount(torch.where(overlap.long()==1,n_mask.long(),torch.tensor(0).cuda(cuda_id)).view(-1))[1:]).float()+1
n_mask=torch.where(non_overlap==1,area_num,n_mask)
base=torch.where(n_mask>0,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
#print(areas)
# areas_nums=torch.tensor(1).float().cuda(cuda_id)
# for i in range(1,torch.max(n_mask).long()+1):
# region=torch.where(n_mask==i,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
# pixels=region.nonzero()
# if pixels.shape[0]>0:
# minx=torch.min(pixels[:,0])
# maxx=torch.max(pixels[:,0])
# miny=torch.min(pixels[:,1])
# maxy=torch.max(pixels[:,1])
# for i in range(1,torch.ceil((maxx-minx).float()/80).int()+1):
# for j in range(1,torch.ceil((maxy-miny).float()/80).int()+1):
# if torch.sum(region[minx+80*(i-1):minx+80*i,miny+80*(j-1):miny+80*j])>400:
# region[minx+80*(i-1):minx+80*i,miny+80*(j-1):miny+80*j]*=i*j
# areas=torch.unique(region).sort()[0]
# for i in range(1,len(areas)):
# region=torch.where(region==areas[i],-areas_nums,region)
# areas_nums+=1
# n_mask=torch.where(n_mask==i,region,n_mask)
# n_mask=-n_mask
return n_mask
def fast_cluster(feature,bandwidth=0.16):
masks=[]
areas=[]
segments=[]
#start_time=time.time()
for i in range(feature.shape[0]):
n_mask=0
n_feature=feature[i,...]
label=torch.zeros(n_feature.shape[1],n_feature.shape[2]).cuda(cuda_id).float()
check=0
count=0
while(torch.min(label)==0):
candidate=torch.where(label==0,torch.tensor(1).float().cuda(cuda_id),torch.tensor(0).float().cuda(cuda_id)).nonzero()
#print(len(candidate))
seed=torch.randint(len(candidate),(1,))[0].long()
mean=n_feature[:,candidate[seed][0].long(),candidate[seed][1].long()].view(n_feature.shape[0],1,1)
mean=mean_shift(n_feature, mean, bandwidth)
t_masks=get_mask(n_feature, mean, bandwidth)
#print(len(candidate),n_mask)
label=label+t_masks
if n_mask==0:
#r_masks=refine_mask(t_masks)
n_masks=t_masks
n_mask=torch.max(n_masks)
else:
#r_masks=refine_mask(t_masks)
n_masks=fuse_mask(n_masks,t_masks)
n_mask=torch.max(n_masks)
#print(torch.max(n_masks))
if len(candidate)==check:
count+=1
else:
check=len(candidate)
if count>3:
bandwidth=bandwidth*1.1
count=0
if n_mask==50:
bandwidth=bandwidth*1.1
if n_mask==60:
bandwidth=bandwidth*1.1
# if n_mask==70:
# bandwidth=bandwidth*1.1
# if n_mask==100:
# bandwidth=bandwidth*1.1
if n_mask>70:
#n_masks=fuse_mask(n_masks,torch.where(label==0,torch.tensor(1).float().cuda(cuda_id),torch.tensor(0).float().cuda(cuda_id)))
break
#print(time.time()-start_time)
return n_masks
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
pad=nn.ReplicationPad2d(1)
padding=0
conv_mod = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, bias=False)
return nn.Sequential(pad,conv_mod)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.gn1 = nn.GroupNorm(group_dim,planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.gn2 = nn.GroupNorm(group_dim,planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.gn2(out)
if self.downsample is not None:
residual = self.downsample(x)
# print(residual.shape)
# print(out.shape)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.gn1 = nn.GroupNorm(group_dim,planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.gn2 = nn.GroupNorm(group_dim,planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.gn3 = nn.GroupNorm(group_dim,planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.gn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.gn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class rsn_cluster(nn.Module):
def __init__(self,
n_classes=64,
block_config=[3, 16, 3, 3],
input_size= (480, 640),
version='scene'):
super(rsn_cluster, self).__init__()
self.inplanes = 64
layers=[4, 10, 5, 5]
block=BasicBlock
# Encoder
self.conv1=conv2DGroupNormRelu(3, 32, k_size=3,
padding=1, stride=1, bias=False)
self.conv2=conv2DGroupNormRelu(32, 64, k_size=3,
padding=1, stride=1, bias=False)
self.layer1 = self._make_layer(block, 64, layers[0],stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 256, layers[3], stride=1)
# self.layer5 = conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=256,
# padding=1, stride=1, bias=False,group_dim=group_dim)
# Pyramid Pooling Module
#we need to modify the padding to keep the diminsion
#remove 1 ,because the error of bn
self.pyramid_pooling = pyramidPoolingGroupNorm(256, [[30,40],[12,16],[3,4],[1,1]],group_dim=group_dim)
#self.global_pooling = globalPooling(256, 1)
# Final conv layers
#self.cbr_final = conv2DBatchNormRelu(512, 256, 3, 1, 1, False)
#self.dropout = nn.Dropout2d(p=0.1, inplace=True)
self.fuse0 = conv2DGroupNormRelu(in_channels=512, k_size=3, n_filters=256,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.fuse1 = conv2DGroupNormRelu(in_channels=256, k_size=3, n_filters=128,
padding=1, stride=1, bias=False,group_dim=group_dim)
#we need to replace the upsampling unit with nearest and deconv2d
self.deconv1 = deconv2DGroupNormRelu(in_channels=128, n_filters=128, k_size=4,
stride=2, padding=1,output_padding=0, bias=False,group_dim=group_dim)
self.fuse2 = conv2DGroupNormRelu(in_channels=256, k_size=3, n_filters=192,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.deconv2 = deconv2DGroupNormRelu(in_channels=192, n_filters=192, k_size=4,
stride=2, padding=1,output_padding=0, bias=False,group_dim=group_dim)
self.fuse3 = conv2DGroupNormRelu(in_channels=256, k_size=3, n_filters=256,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.inplanes = 257
self.regress1 = self._make_layer(block,128, 4, stride=1)
self.regress2 = conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.regress3 = conv2DGroupNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.regress4 = conv2DRelu(in_channels=32, k_size=3, n_filters=1,
padding=1, stride=1, bias=False)
self.class0= conv2DGroupNormRelu(in_channels=258, k_size=1, n_filters=128,
padding=0, stride=1, bias=False,group_dim=group_dim)
self.class1= conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.class2= conv2DRelu(in_channels=64, k_size=3, n_filters=64,
padding=1, stride=1, bias=False)
self.class3= conv2DRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1, bias=False)
self.class4= conv2D(in_channels=32, k_size=1, n_filters=16,
padding=0, stride=1, bias=False)
# self.outrefine1=conv2DGroupNormRelu(in_channels=514, k_size=1, n_filters=128,
# padding=0, stride=1, bias=False,group_dim=group_dim)
# self.outrefine2=conv2DGroupNormRelu(in_channels=128, k_size=1, n_filters=64,
# padding=0, stride=1, bias=False,group_dim=group_dim)
# self.outrefine3=conv2DRelu(in_channels=64, k_size=3, n_filters=32,
# padding=1, stride=1, bias=False)
# self.outrefine4= conv2D(in_channels=32, k_size=1, n_filters=1,
# padding=0, stride=1, bias=False)
self.inrefine1=conv2DGroupNormRelu(in_channels=513, k_size=3, n_filters=128,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.inrefine2=conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.inrefine3=conv2DGroupNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.inrefine4= conv2DRelu(in_channels=32, k_size=1, n_filters=16,
padding=0, stride=1, bias=False)
self.inrefine5= conv2D(in_channels=16, k_size=1, n_filters=1,
padding=0, stride=1, bias=False)
self.reliable1=conv2DGroupNormRelu(in_channels=513, k_size=3, n_filters=128,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.reliable2=conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.reliable3= conv2DGroupNormRelu(in_channels=64, k_size=1, n_filters=32,
padding=0, stride=1, bias=False,group_dim=group_dim)
self.reliable4= conv2DGroupNormRelu(in_channels=32, k_size=1, n_filters=16,
padding=0, stride=1, bias=False)
self.reliable5= conv2D(in_channels=16, k_size=1, n_filters=1,
padding=0, stride=1, bias=False)
self.output=nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.fill_(1)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.ReplicationPad2d(0),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False,padding=0),
nn.GroupNorm(group_dim,planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
#print(self.inplanes)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x,segments,labels,flag,task):
#print(x.shape)
location_map=torch.cat([(torch.arange(x.shape[-1])/x.shape[-1]).unsqueeze(0).expand(x.shape[-2],x.shape[-1]).unsqueeze(0), \
(torch.arange(x.shape[-2])/x.shape[-2]).unsqueeze(0).transpose(1,0).expand(x.shape[-2],x.shape[-1]).unsqueeze(0)],0).unsqueeze(0).float().cuda(cuda_id)
#x=torch.cat([x,location_map],1)
zero=torch.zeros(1).cuda(cuda_id)
one=torch.ones(1).cuda(cuda_id)
x = self.conv1(x)
x=self.conv2(x)
x1 = self.layer1(x)
#half resolution
x2 = self.layer2(x1)
#print(x.shape)
x = self.layer3(x2)
#print(x.shape)
x = self.layer4(x)
#print(x.shape)
# H, W -> H/2, W/2
x = self.pyramid_pooling(x)
#x = self.cbr_final(x)
#x = self.dropout(x)
x = self.fuse0(x)
x = self.fuse1(x)
#print(x.shape)
x = self.deconv1(x)
#print(x.shape,x2.shape)
x = self.fuse2(torch.cat((x,x2),1))
x = self.deconv2(x)
#print(x.shape)
x_share = self.fuse3(torch.cat((x,x1),1))
# x=self.regress1(x_share)
# #print(x.shape)
# x=self.regress2(x)
# x=self.regress3(x)
# depth=self.regress4(x)
# accurate_depth=depth
# return depth,accurate_depth
#clustering feature
#accurate_depth=depth*reliable
if flag==0:
x_fuse=torch.cat([x_share,location_map],1)
y=self.class0(x_fuse)
y=self.class1(y)
y=self.class2(y)
y=self.class3(y)
y=self.class4(y)
with torch.no_grad():
masks=fast_cluster(y).view(1,1,x_share.shape[-2],x_share.shape[-1])
#masks=segments.view(1,1,x_share.shape[-2],x_share.shape[-1])
x=self.regress1(torch.cat([x_share,masks],1))
#x=self.regress1(torch.cat([x_share,masks],1))
#print(x.shape)
x=self.regress2(x)
x=self.regress3(x)
depth=self.regress4(x)
with torch.no_grad():
#masks=fast_cluster(y).view_as(depth)
#masks=segments.view_as(depth)
labels=labels.view_as(depth)
#coarse depth
coarse_depth=depth+0
coarse_feature=x_share+0
mean_features=torch.zeros(1,x_share.shape[1],torch.max(masks).long()+1).cuda(cuda_id)
mean_depth=torch.zeros(torch.max(masks).long()+1).cuda(cuda_id)
#print(torch.max(masks))
for i in range(1,torch.max(masks).int()+1):
index_r=torch.where(masks==i,one,zero)
mean_d=torch.sum(index_r*depth)/torch.sum(index_r)
mean_depth[i]=mean_d
coarse_depth=torch.where(masks==i,mean_d,coarse_depth)
mean_f=torch.sum((index_r*x_share).view(x_share.shape[0],x_share.shape[1],-1),dim=-1)/torch.sum(index_r)
#print(mean_f.shape,mean_features[...,i].shape)
mean_features[...,i]=mean_f
coarse_feature=torch.where(masks==i,mean_f.view(x_share.shape[0],x_share.shape[1],1,1),coarse_feature)
# #refine outer
# outer_feature=torch.zeros(1,2*x_share.shape[1]+2,torch.max(masks).long()+1,torch.max(masks).long()+1).cuda(cuda_id)
# for i in range(torch.min(masks).int(),torch.max(masks).int()+1):
# for j in range(torch.min(masks).int(),torch.max(masks).int()+1):
# if i!=j:
# #print(outer_feature[...,i,j].shape,mean_depth[i].view(1,1).shape,mean_features[...,i].shape)
# outer_feature[...,i,j]=torch.cat([mean_depth[i].view(1,1),mean_features[...,i],mean_depth[j].view(1,1),mean_features[...,j]],dim=-1)
# outer=self.outrefine1(outer_feature)
# outer=self.outrefine2(outer)
# outer=self.outrefine3(outer)
# outer_variance=self.outrefine4(outer)
# outer_depth=torch.zeros(torch.max(masks).long()+1).cuda(cuda_id)
# # #mean_depth_map=coarse_depth+0
# #with torch.no_grad():
# for i in range(torch.min(masks).int(),torch.max(masks).int()+1):
# outer_depth[i]=(torch.sum(mean_depth*outer_variance[...,i,:])+mean_depth[i])/torch.sum(outer_variance[...,i,:]+1)
# #outer_depth[i]=(torch.sum(mean_depth*outer_variance[...,i,:])+mean_depth[i])
# coarse_depth=torch.where(masks==i,outer_depth[i],coarse_depth)+0
#refine inner
#coarse_depth=self.output(coarse_depth)
inner_feature= torch.cat([coarse_depth,x_share,coarse_feature],1)
inner=self.inrefine1(inner_feature)
inner=self.inrefine2(inner)
inner=self.inrefine3(inner)
inner=self.inrefine4(inner)
inner_variance=self.inrefine5(inner)
reliable_feature= torch.cat([depth,x_share,coarse_feature],1)
reliable=self.inrefine1(reliable_feature)
reliable=self.inrefine2(reliable)
reliable=self.inrefine3(reliable)
reliable=self.inrefine4(reliable)
reliable_variance=self.inrefine5(reliable)
# #inner_variance[:,0,...]=inner_variance[:,0,...]/torch.max(inner_variance[:,0,...])
# reliable_to_depth=(inner_variance[:,0,...]/torch.max(inner_variance[:,0,...])).unsqueeze(1)
# variance_on_cosrse=inner_variance[:,1,...].unsqueeze(1)
# #print(inner_variance.shape)
# accurate_depth=depth*reliable_to_depth+(coarse_depth*variance_on_cosrse)*(1-reliable_to_depth)
loss_var,loss_dis,loss_reg = cluster_loss(y,segments.long(),device_id=cuda_id)
loss_var=loss_var.reshape((y.shape[0],1))
loss_dis=loss_dis.reshape((y.shape[0],1))
loss_reg=loss_reg.reshape((y.shape[0],1))
accurate_depth=self.output(inner_variance+coarse_depth)
depth=self.output(reliable_variance+depth)
accurate_depth=torch.where(masks>0,(depth+accurate_depth)/2,depth)
#print(torch.mean(depth).item(),torch.mean(coarse_depth).item())
return masks,accurate_depth,loss_var,loss_dis,loss_reg
else:
if task=='train':
with torch.no_grad():
masks=fast_cluster(y).view_as(depth)
print(torch.max(masks))
loss_var,loss_dis,loss_reg = cluster_loss(y,segments.long())
loss_var=loss_var.reshape((y.shape[0],1))
loss_dis=loss_dis.reshape((y.shape[0],1))
loss_reg=loss_reg.reshape((y.shape[0],1))
return depth,masks,loss_var,loss_dis,loss_reg
elif task=='test':
loss_var,loss_dis,loss_reg = cluster_loss(y,segments.long())
loss_var=loss_var.reshape((y.shape[0],1))
loss_dis=loss_dis.reshape((y.shape[0],1))
loss_reg=loss_reg.reshape((y.shape[0],1))
return depth,loss_var,loss_dis,loss_reg
elif task=='eval':
x_fuse=torch.cat([x_share,location_map],1)
masks=segments.view_as(depth)
#coarse depth
coarse_depth=depth+0
coarse_feature=x_fuse+0
mean_features=torch.zeros(1,x_fuse.shape[1],torch.max(masks).long()+1).cuda(cuda_id)
mean_depth=torch.zeros(torch.max(masks).long()+1).cuda(cuda_id)
for i in range(torch.min(masks).int(),torch.max(masks).int()+1):
index_r=torch.where(masks==i,one,zero)
mean_d=torch.sum(index_r*depth)/torch.sum(index_r)
mean_depth[i]=mean_d+0
coarse_depth=torch.where(masks==i,mean_depth[i],coarse_depth)
mean_f=torch.sum((index_r*x_fuse).view(x_fuse.shape[0],x_fuse.shape[1],-1),dim=-1)/torch.sum(index_r)
#print(mean_f.shape,mean_features[...,i].shape)
mean_features[...,i]=mean_f
coarse_feature=torch.where(masks==i,mean_f.view(x_fuse.shape[0],x_fuse.shape[1],1,1),coarse_feature)
#refine outer
# outer_feature=torch.zeros(1,2*x_fuse.shape[1]+2,torch.max(masks).long()-torch.min(masks).long()+1,torch.max(masks).long()-torch.min(masks).long()+1).cuda(cuda_id)
# for i in range(torch.min(masks).int(),torch.max(masks).int()+1):
# for j in range(torch.min(masks).int(),torch.max(masks).int()+1):
# if i!=j:
# #print(outer_feature[...,i,j].shape,mean_depth[i].view(1,1).shape,mean_features[...,i].shape)
# outer_feature[...,i,j]=torch.cat([mean_depth[i].view(1,1),mean_features[...,i],mean_depth[j].view(1,1),mean_features[...,j]],dim=-1)
# outer=self.outrefine1(outer_feature)
# outer=self.outrefine2(outer)
# outer=self.outrefine3(outer)
# outer_variance=self.outrefine4(outer)
# outer_depth=torch.zeros(torch.max(masks).long()-torch.min(masks).long()+1).cuda(cuda_id)
# #mean_depth_map=coarse_depth+0
# # print(torch.min(masks))
# # print(torch.sum(torch.where(masks==0,torch.ones(1).cuda(cuda_id),torch.zeros(1).cuda(cuda_id))))
# for i in range(torch.min(masks).int(),torch.max(masks).int()+1):
# outer_depth[i]=(torch.sum(mean_depth*outer_variance[...,i,:])+mean_depth[i])/(torch.sum(outer_variance[...,i,:])+1)
# #outer_depth[i]=(torch.sum(mean_depth*outer_variance[...,i,:])+mean_depth[i])
# coarse_depth=torch.where(masks==i,outer_depth[i],coarse_depth)+0
#print(torch.max(coarse_depth),torch.mean(mean_depth),torch.mean(outer_depth),torch.max(outer_variance))
#mean_depth_map=coarse_depth+0
#refine inner
inner_feature= torch.cat([coarse_depth,x_fuse-coarse_feature],1)
#print('inner_feature',torch.max(inner_feature).item())
inner=self.inrefine1(inner_feature)
#print('inner_1',torch.max(inner).item())
inner=self.inrefine2(inner)
#print('inner_2',torch.max(inner).item())
inner=self.inrefine3(inner)
#print('inner_3',torch.max(inner).item())
inner=self.inrefine4(inner)
inner_variance=self.inrefine5(inner)
accurate_depth=inner_variance
# inner_feature= torch.cat([depth,x_share],1)
# relialbe=self.reliable1(inner_feature)
# relialbe=self.reliable2(relialbe)
# relialbe=self.reliable3(relialbe)
# relialbe=self.reliable4(relialbe)
# relialbe=self.reliable5(relialbe)
# accurate_depth=relialbe
# print('inner_variance',torch.max(inner_variance).item())
# inner_variance[:,0,...]=inner_variance[:,0,...]/torch.max(inner_variance[:,0,...])
# reliable_to_depth=(torch.exp(-relialbe[:,0,...])).unsqueeze(1)
# reliable_to_coarse=(torch.exp(-inner_variance[:,0,...])).unsqueeze(1)
# variance_on_depth=relialbe[:,1,...].unsqueeze(1)
# variance_on_cosrse=inner_variance[:,1,...].unsqueeze(1)
# print('reliable_depth: %.2f reliable_coarse: %.2f variance_depth %.2f variance_coarse %.2f'%(torch.mean(reliable_to_depth).item(), \
# torch.mean(reliable_to_coarse).item(),torch.mean(variance_on_depth).item(),torch.mean(variance_on_cosrse).item()))
# #print('variance %.2f'%(torch.mean(inner_variance).item()))
# relialbe_weights=reliable_to_coarse+reliable_to_depth
# # #print(inner_variance.shape)
# accurate_depth=(depth*variance_on_depth*reliable_to_coarse+coarse_depth*variance_on_cosrse*reliable_to_coarse)/ \
# (torch.where(relialbe_weights==0,torch.ones(1).cuda(cuda_id),relialbe_weights))
# refined_depth=depth*variance_on_depth
# coarse_depth=coarse_depth*variance_on_cosrse
# accurate_depth=(coarse_depth*reliable_to_coarse+refined_depth*(1-reliable_to_coarse))
# accurate_depth=refined_depth*reliable_to_depth
# print('depth',torch.max(depth).item())
# print('coarse',torch.max(coarse_depth).item())
# print('accurate',torch.max(accurate_depth).item())
# loss_var,loss_dis,loss_reg = cluster_loss(y,segments.long())
# loss_var=loss_var.reshape((y.shape[0],1))
# loss_dis=loss_dis.reshape((y.shape[0],1))
# loss_reg=loss_reg.reshape((y.shape[0],1))
# accurate_depth=inner_variance
# simple refinement
# x_fuse=x_share+depth.expand_as(x_share)
# inner=self.inrefine1(x_fuse)
# inner=self.inrefine2(inner)
# inner=self.inrefine3(inner)
# inner=self.inrefine4(inner)
# accurate_depth=self.inrefine5(inner)
accurate_depth=depth
return depth,accurate_depth
| 49.704248 | 180 | 0.571715 |
import torch
import numpy as np
import torch.nn as nn
import math
from math import ceil
from torch.autograd import Variable
from rsden.cluster_loss import *
from rsden import caffe_pb2
from rsden.models.utils import *
import time
cuda_id=3
group_dim=1
def mean_shift(feature,mean,bandwidth):
for t in range(10):
dis=feature-mean
dis=torch.norm(dis,dim=0)
mask=torch.where(dis<bandwidth,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
mean=torch.sum((feature*mask).view(feature.shape[0],feature.shape[1]*feature.shape[2]),dim=1)/torch.sum(mask)
mean=mean.view([feature.shape[0],1,1])
return mean
def get_mask(feature,mean,bandwidth):
mean=mean.view([mean.shape[0],1,1])
dis=feature-mean
dis=torch.norm(dis,dim=0)
mask=torch.where(dis<bandwidth,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id))
return mask.float()
def re_label(mask,area,bandwidth):
index=torch.sum(area)
print(index)
count=torch.tensor(0).float().cuda(cuda_id)
for i in range(area.shape[0]):
mask[i,:,:]=torch.where(mask[i,:,:]>0,mask[i,:,:]+count,mask[i,:,:])
count+=area[i]
segment=torch.where(mask>0,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
final=torch.sum(mask,dim=0)/torch.sum(segment,dim=0)
final=torch.squeeze(final)
final=final/255
return mask,area,final
def refine_mask(mask):
pixels=mask.nonzero()
if torch.sum(mask)<400:
return mask
minx=torch.min(pixels[:,0])
maxx=torch.max(pixels[:,0])
miny=torch.min(pixels[:,1])
maxy=torch.max(pixels[:,1])
for i in range(1,torch.ceil((maxx-minx).float()/80).int()+1):
for j in range(1,torch.ceil((maxy-miny).float()/80).int()+1):
if torch.sum(mask[minx+80*(i-1):minx+80*i,miny+80*(j-1):miny+80*j])>400:
mask[minx+80*(i-1):minx+80*i,miny+80*(j-1):miny+80*j]*=i*j
areas=torch.unique(mask).sort()[0]
for i in range(1,len(areas)):
mask=torch.where(mask==areas[i],-torch.ones(1).float().cuda(cuda_id)*i,mask)
mask=-mask
return mask.float()
def fuse_mask(n_mask,r_mask):
base=torch.where(n_mask>0,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
areas=torch.max(n_mask)
i=1
shift=torch.where(r_mask==i,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
non_overlap=torch.where(base-shift==-1,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
overlap=shift-non_overlap
if torch.sum(non_overlap)/torch.sum(shift)>0.4:
areas+=1
n_mask=torch.where(non_overlap==1,areas,n_mask)
base=torch.where(n_mask>0,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
else:
area_num=torch.argmax(torch.bincount(torch.where(overlap.long()==1,n_mask.long(),torch.tensor(0).cuda(cuda_id)).view(-1))[1:]).float()+1
n_mask=torch.where(non_overlap==1,area_num,n_mask)
base=torch.where(n_mask>0,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
return n_mask
def fast_cluster(feature,bandwidth=0.16):
masks=[]
areas=[]
segments=[]
for i in range(feature.shape[0]):
n_mask=0
n_feature=feature[i,...]
label=torch.zeros(n_feature.shape[1],n_feature.shape[2]).cuda(cuda_id).float()
check=0
count=0
while(torch.min(label)==0):
candidate=torch.where(label==0,torch.tensor(1).float().cuda(cuda_id),torch.tensor(0).float().cuda(cuda_id)).nonzero()
seed=torch.randint(len(candidate),(1,))[0].long()
mean=n_feature[:,candidate[seed][0].long(),candidate[seed][1].long()].view(n_feature.shape[0],1,1)
mean=mean_shift(n_feature, mean, bandwidth)
t_masks=get_mask(n_feature, mean, bandwidth)
label=label+t_masks
if n_mask==0:
n_masks=t_masks
n_mask=torch.max(n_masks)
else:
n_masks=fuse_mask(n_masks,t_masks)
n_mask=torch.max(n_masks)
if len(candidate)==check:
count+=1
else:
check=len(candidate)
if count>3:
bandwidth=bandwidth*1.1
count=0
if n_mask==50:
bandwidth=bandwidth*1.1
if n_mask==60:
bandwidth=bandwidth*1.1
if n_mask>70:
break
return n_masks
def conv3x3(in_planes, out_planes, stride=1):
pad=nn.ReplicationPad2d(1)
padding=0
conv_mod = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, bias=False)
return nn.Sequential(pad,conv_mod)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.gn1 = nn.GroupNorm(group_dim,planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.gn2 = nn.GroupNorm(group_dim,planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.gn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.gn1 = nn.GroupNorm(group_dim,planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.gn2 = nn.GroupNorm(group_dim,planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.gn3 = nn.GroupNorm(group_dim,planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.gn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.gn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class rsn_cluster(nn.Module):
def __init__(self,
n_classes=64,
block_config=[3, 16, 3, 3],
input_size= (480, 640),
version='scene'):
super(rsn_cluster, self).__init__()
self.inplanes = 64
layers=[4, 10, 5, 5]
block=BasicBlock
self.conv1=conv2DGroupNormRelu(3, 32, k_size=3,
padding=1, stride=1, bias=False)
self.conv2=conv2DGroupNormRelu(32, 64, k_size=3,
padding=1, stride=1, bias=False)
self.layer1 = self._make_layer(block, 64, layers[0],stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 256, layers[3], stride=1)
self.pyramid_pooling = pyramidPoolingGroupNorm(256, [[30,40],[12,16],[3,4],[1,1]],group_dim=group_dim)
self.fuse0 = conv2DGroupNormRelu(in_channels=512, k_size=3, n_filters=256,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.fuse1 = conv2DGroupNormRelu(in_channels=256, k_size=3, n_filters=128,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.deconv1 = deconv2DGroupNormRelu(in_channels=128, n_filters=128, k_size=4,
stride=2, padding=1,output_padding=0, bias=False,group_dim=group_dim)
self.fuse2 = conv2DGroupNormRelu(in_channels=256, k_size=3, n_filters=192,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.deconv2 = deconv2DGroupNormRelu(in_channels=192, n_filters=192, k_size=4,
stride=2, padding=1,output_padding=0, bias=False,group_dim=group_dim)
self.fuse3 = conv2DGroupNormRelu(in_channels=256, k_size=3, n_filters=256,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.inplanes = 257
self.regress1 = self._make_layer(block,128, 4, stride=1)
self.regress2 = conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.regress3 = conv2DGroupNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.regress4 = conv2DRelu(in_channels=32, k_size=3, n_filters=1,
padding=1, stride=1, bias=False)
self.class0= conv2DGroupNormRelu(in_channels=258, k_size=1, n_filters=128,
padding=0, stride=1, bias=False,group_dim=group_dim)
self.class1= conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.class2= conv2DRelu(in_channels=64, k_size=3, n_filters=64,
padding=1, stride=1, bias=False)
self.class3= conv2DRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1, bias=False)
self.class4= conv2D(in_channels=32, k_size=1, n_filters=16,
padding=0, stride=1, bias=False)
self.inrefine1=conv2DGroupNormRelu(in_channels=513, k_size=3, n_filters=128,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.inrefine2=conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.inrefine3=conv2DGroupNormRelu(in_channels=64, k_size=3, n_filters=32,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.inrefine4= conv2DRelu(in_channels=32, k_size=1, n_filters=16,
padding=0, stride=1, bias=False)
self.inrefine5= conv2D(in_channels=16, k_size=1, n_filters=1,
padding=0, stride=1, bias=False)
self.reliable1=conv2DGroupNormRelu(in_channels=513, k_size=3, n_filters=128,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.reliable2=conv2DGroupNormRelu(in_channels=128, k_size=3, n_filters=64,
padding=1, stride=1, bias=False,group_dim=group_dim)
self.reliable3= conv2DGroupNormRelu(in_channels=64, k_size=1, n_filters=32,
padding=0, stride=1, bias=False,group_dim=group_dim)
self.reliable4= conv2DGroupNormRelu(in_channels=32, k_size=1, n_filters=16,
padding=0, stride=1, bias=False)
self.reliable5= conv2D(in_channels=16, k_size=1, n_filters=1,
padding=0, stride=1, bias=False)
self.output=nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.fill_(1)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.ReplicationPad2d(0),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False,padding=0),
nn.GroupNorm(group_dim,planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x,segments,labels,flag,task):
location_map=torch.cat([(torch.arange(x.shape[-1])/x.shape[-1]).unsqueeze(0).expand(x.shape[-2],x.shape[-1]).unsqueeze(0), \
(torch.arange(x.shape[-2])/x.shape[-2]).unsqueeze(0).transpose(1,0).expand(x.shape[-2],x.shape[-1]).unsqueeze(0)],0).unsqueeze(0).float().cuda(cuda_id)
zero=torch.zeros(1).cuda(cuda_id)
one=torch.ones(1).cuda(cuda_id)
x = self.conv1(x)
x=self.conv2(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x = self.layer3(x2)
x = self.layer4(x)
x = self.pyramid_pooling(x)
x = self.fuse0(x)
x = self.fuse1(x)
x = self.deconv1(x)
x = self.fuse2(torch.cat((x,x2),1))
x = self.deconv2(x)
x_share = self.fuse3(torch.cat((x,x1),1))
if flag==0:
x_fuse=torch.cat([x_share,location_map],1)
y=self.class0(x_fuse)
y=self.class1(y)
y=self.class2(y)
y=self.class3(y)
y=self.class4(y)
with torch.no_grad():
masks=fast_cluster(y).view(1,1,x_share.shape[-2],x_share.shape[-1])
x=self.regress1(torch.cat([x_share,masks],1))
x=self.regress2(x)
x=self.regress3(x)
depth=self.regress4(x)
with torch.no_grad():
labels=labels.view_as(depth)
coarse_depth=depth+0
coarse_feature=x_share+0
mean_features=torch.zeros(1,x_share.shape[1],torch.max(masks).long()+1).cuda(cuda_id)
mean_depth=torch.zeros(torch.max(masks).long()+1).cuda(cuda_id)
for i in range(1,torch.max(masks).int()+1):
index_r=torch.where(masks==i,one,zero)
mean_d=torch.sum(index_r*depth)/torch.sum(index_r)
mean_depth[i]=mean_d
coarse_depth=torch.where(masks==i,mean_d,coarse_depth)
mean_f=torch.sum((index_r*x_share).view(x_share.shape[0],x_share.shape[1],-1),dim=-1)/torch.sum(index_r)
mean_features[...,i]=mean_f
coarse_feature=torch.where(masks==i,mean_f.view(x_share.shape[0],x_share.shape[1],1,1),coarse_feature)
ure],1)
inner=self.inrefine1(inner_feature)
inner=self.inrefine2(inner)
inner=self.inrefine3(inner)
inner=self.inrefine4(inner)
inner_variance=self.inrefine5(inner)
reliable_feature= torch.cat([depth,x_share,coarse_feature],1)
reliable=self.inrefine1(reliable_feature)
reliable=self.inrefine2(reliable)
reliable=self.inrefine3(reliable)
reliable=self.inrefine4(reliable)
reliable_variance=self.inrefine5(reliable)
gments.long(),device_id=cuda_id)
loss_var=loss_var.reshape((y.shape[0],1))
loss_dis=loss_dis.reshape((y.shape[0],1))
loss_reg=loss_reg.reshape((y.shape[0],1))
accurate_depth=self.output(inner_variance+coarse_depth)
depth=self.output(reliable_variance+depth)
accurate_depth=torch.where(masks>0,(depth+accurate_depth)/2,depth)
return masks,accurate_depth,loss_var,loss_dis,loss_reg
else:
if task=='train':
with torch.no_grad():
masks=fast_cluster(y).view_as(depth)
print(torch.max(masks))
loss_var,loss_dis,loss_reg = cluster_loss(y,segments.long())
loss_var=loss_var.reshape((y.shape[0],1))
loss_dis=loss_dis.reshape((y.shape[0],1))
loss_reg=loss_reg.reshape((y.shape[0],1))
return depth,masks,loss_var,loss_dis,loss_reg
elif task=='test':
loss_var,loss_dis,loss_reg = cluster_loss(y,segments.long())
loss_var=loss_var.reshape((y.shape[0],1))
loss_dis=loss_dis.reshape((y.shape[0],1))
loss_reg=loss_reg.reshape((y.shape[0],1))
return depth,loss_var,loss_dis,loss_reg
elif task=='eval':
x_fuse=torch.cat([x_share,location_map],1)
masks=segments.view_as(depth)
coarse_depth=depth+0
coarse_feature=x_fuse+0
mean_features=torch.zeros(1,x_fuse.shape[1],torch.max(masks).long()+1).cuda(cuda_id)
mean_depth=torch.zeros(torch.max(masks).long()+1).cuda(cuda_id)
for i in range(torch.min(masks).int(),torch.max(masks).int()+1):
index_r=torch.where(masks==i,one,zero)
mean_d=torch.sum(index_r*depth)/torch.sum(index_r)
mean_depth[i]=mean_d+0
coarse_depth=torch.where(masks==i,mean_depth[i],coarse_depth)
mean_f=torch.sum((index_r*x_fuse).view(x_fuse.shape[0],x_fuse.shape[1],-1),dim=-1)/torch.sum(index_r)
mean_features[...,i]=mean_f
coarse_feature=torch.where(masks==i,mean_f.view(x_fuse.shape[0],x_fuse.shape[1],1,1),coarse_feature)
],1)
inner=self.inrefine1(inner_feature)
inner=self.inrefine2(inner)
inner=self.inrefine3(inner)
inner=self.inrefine4(inner)
inner_variance=self.inrefine5(inner)
accurate_depth=inner_variance
accurate_depth=depth
return depth,accurate_depth
| true | true |
f731bcccc4e6d8aaa6addf7cc5d2ba13d7d25f7c | 907 | py | Python | feed_generator.py | chand1012/static-rss-generator | 931ae40b156232d783202520c34bd175f615d4a4 | [
"MIT"
] | 1 | 2021-07-31T14:55:05.000Z | 2021-07-31T14:55:05.000Z | feed_generator.py | chand1012/static-rss-generator | 931ae40b156232d783202520c34bd175f615d4a4 | [
"MIT"
] | null | null | null | feed_generator.py | chand1012/static-rss-generator | 931ae40b156232d783202520c34bd175f615d4a4 | [
"MIT"
] | null | null | null | from datetime import datetime
import random
import json
import arrow
import feedparser
from rfeed import Feed, Item
# returns the feed string given the JSON object
def generate_feed(link_data: list[dict], rss_link: str) -> str:
data = []
for link in link_data:
feed = feedparser.parse(list(link.keys())[0])
for i in range(list(link.values())[0]):
newest = feed['entries'][i]
data.append(Item(
title=newest['title'],
pubDate=arrow.get(newest['published'], 'DD MMM YYYY HH:mm:ss'),
description=newest['summary'],
link=newest['link']
))
random.shuffle(data)
return_feed = Feed(
title='Aggregate RSS Feed',
description='Aggregate RSS Feed',
link=rss_link,
lastBuildDate=datetime.now(),
items=data,
)
return return_feed.rss() | 25.914286 | 79 | 0.594267 | from datetime import datetime
import random
import json
import arrow
import feedparser
from rfeed import Feed, Item
def generate_feed(link_data: list[dict], rss_link: str) -> str:
data = []
for link in link_data:
feed = feedparser.parse(list(link.keys())[0])
for i in range(list(link.values())[0]):
newest = feed['entries'][i]
data.append(Item(
title=newest['title'],
pubDate=arrow.get(newest['published'], 'DD MMM YYYY HH:mm:ss'),
description=newest['summary'],
link=newest['link']
))
random.shuffle(data)
return_feed = Feed(
title='Aggregate RSS Feed',
description='Aggregate RSS Feed',
link=rss_link,
lastBuildDate=datetime.now(),
items=data,
)
return return_feed.rss() | true | true |
f731bd9216657521387add8186e360b1be8b9acc | 20,089 | py | Python | tests/unit/containers/test_declarative_py2_py3.py | whysage/python-dependency-injector | cef6d35cfdf5f39438a89f000d11a21860bc8c5f | [
"BSD-3-Clause"
] | 1,997 | 2016-04-26T13:41:45.000Z | 2022-03-31T16:17:53.000Z | tests/unit/containers/test_declarative_py2_py3.py | whysage/python-dependency-injector | cef6d35cfdf5f39438a89f000d11a21860bc8c5f | [
"BSD-3-Clause"
] | 399 | 2016-05-16T07:20:07.000Z | 2022-03-31T18:23:49.000Z | tests/unit/containers/test_declarative_py2_py3.py | whysage/python-dependency-injector | cef6d35cfdf5f39438a89f000d11a21860bc8c5f | [
"BSD-3-Clause"
] | 162 | 2016-05-16T09:21:43.000Z | 2022-03-30T23:00:26.000Z | """Dependency injector declarative container unit tests."""
import collections
import unittest
from dependency_injector import (
containers,
providers,
errors,
)
class ContainerA(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
class ContainerB(ContainerA):
p21 = providers.Provider()
p22 = providers.Provider()
class ContainerC(ContainerB):
p31 = providers.Provider()
p32 = providers.Provider()
class DeclarativeContainerTests(unittest.TestCase):
def test_providers_attribute(self):
self.assertEqual(ContainerA.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerB.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22))
self.assertEqual(ContainerC.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22,
p31=ContainerC.p31,
p32=ContainerC.p32))
def test_providers_attribute_with_redefinition(self):
p1 = providers.Provider()
p2 = providers.Provider()
class ContainerA2(ContainerA):
p11 = p1
p12 = p2
self.assertEqual(
ContainerA.providers,
{
'p11': ContainerA.p11,
'p12': ContainerA.p12,
},
)
self.assertEqual(
ContainerA2.providers,
{
'p11': p1,
'p12': p2,
},
)
def test_cls_providers_attribute(self):
self.assertEqual(ContainerA.cls_providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerB.cls_providers, dict(p21=ContainerB.p21,
p22=ContainerB.p22))
self.assertEqual(ContainerC.cls_providers, dict(p31=ContainerC.p31,
p32=ContainerC.p32))
def test_inherited_providers_attribute(self):
self.assertEqual(ContainerA.inherited_providers, dict())
self.assertEqual(ContainerB.inherited_providers,
dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerC.inherited_providers,
dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22))
def test_dependencies_attribute(self):
class ContainerD(ContainerC):
p41 = providers.Dependency()
p42 = providers.DependenciesContainer()
class ContainerE(ContainerD):
p51 = providers.Dependency()
p52 = providers.DependenciesContainer()
self.assertEqual(
ContainerD.dependencies,
{
'p41': ContainerD.p41,
'p42': ContainerD.p42,
},
)
self.assertEqual(
ContainerE.dependencies,
{
'p41': ContainerD.p41,
'p42': ContainerD.p42,
'p51': ContainerE.p51,
'p52': ContainerE.p52,
},
)
def test_set_get_del_providers(self):
a_p13 = providers.Provider()
b_p23 = providers.Provider()
ContainerA.p13 = a_p13
ContainerB.p23 = b_p23
self.assertEqual(ContainerA.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p13=a_p13))
self.assertEqual(ContainerB.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22,
p23=b_p23))
self.assertEqual(ContainerA.cls_providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p13=a_p13))
self.assertEqual(ContainerB.cls_providers, dict(p21=ContainerB.p21,
p22=ContainerB.p22,
p23=b_p23))
del ContainerA.p13
del ContainerB.p23
self.assertEqual(ContainerA.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerB.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22))
self.assertEqual(ContainerA.cls_providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerB.cls_providers, dict(p21=ContainerB.p21,
p22=ContainerB.p22))
def test_declare_with_valid_provider_type(self):
class _Container(containers.DeclarativeContainer):
provider_type = providers.Object
px = providers.Object(object())
self.assertIsInstance(_Container.px, providers.Object)
def test_declare_with_invalid_provider_type(self):
with self.assertRaises(errors.Error):
class _Container(containers.DeclarativeContainer):
provider_type = providers.Object
px = providers.Provider()
def test_seth_valid_provider_type(self):
class _Container(containers.DeclarativeContainer):
provider_type = providers.Object
_Container.px = providers.Object(object())
self.assertIsInstance(_Container.px, providers.Object)
def test_set_invalid_provider_type(self):
class _Container(containers.DeclarativeContainer):
provider_type = providers.Object
with self.assertRaises(errors.Error):
_Container.px = providers.Provider()
def test_override(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer1(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer2(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
_Container.override(_OverridingContainer1)
_Container.override(_OverridingContainer2)
self.assertEqual(_Container.overridden,
(_OverridingContainer1,
_OverridingContainer2))
self.assertEqual(_Container.p11.overridden,
(_OverridingContainer1.p11,
_OverridingContainer2.p11))
def test_override_with_itself(self):
with self.assertRaises(errors.Error):
ContainerA.override(ContainerA)
def test_override_with_parent(self):
with self.assertRaises(errors.Error):
ContainerB.override(ContainerA)
def test_override_decorator(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Provider()
@containers.override(_Container)
class _OverridingContainer1(containers.DeclarativeContainer):
p11 = providers.Provider()
@containers.override(_Container)
class _OverridingContainer2(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
self.assertEqual(_Container.overridden,
(_OverridingContainer1,
_OverridingContainer2))
self.assertEqual(_Container.p11.overridden,
(_OverridingContainer1.p11,
_OverridingContainer2.p11))
def test_reset_last_overriding(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer1(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer2(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
_Container.override(_OverridingContainer1)
_Container.override(_OverridingContainer2)
_Container.reset_last_overriding()
self.assertEqual(_Container.overridden,
(_OverridingContainer1,))
self.assertEqual(_Container.p11.overridden,
(_OverridingContainer1.p11,))
def test_reset_last_overriding_when_not_overridden(self):
with self.assertRaises(errors.Error):
ContainerA.reset_last_overriding()
def test_reset_override(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer1(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer2(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
_Container.override(_OverridingContainer1)
_Container.override(_OverridingContainer2)
_Container.reset_override()
self.assertEqual(_Container.overridden, tuple())
self.assertEqual(_Container.p11.overridden, tuple())
def test_copy(self):
@containers.copy(ContainerA)
class _Container1(ContainerA):
pass
@containers.copy(ContainerA)
class _Container2(ContainerA):
pass
self.assertIsNot(ContainerA.p11, _Container1.p11)
self.assertIsNot(ContainerA.p12, _Container1.p12)
self.assertIsNot(ContainerA.p11, _Container2.p11)
self.assertIsNot(ContainerA.p12, _Container2.p12)
self.assertIsNot(_Container1.p11, _Container2.p11)
self.assertIsNot(_Container1.p12, _Container2.p12)
def test_copy_with_replacing(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Object(0)
p12 = providers.Factory(dict, p11=p11)
@containers.copy(_Container)
class _Container1(_Container):
p11 = providers.Object(1)
p13 = providers.Object(11)
@containers.copy(_Container)
class _Container2(_Container):
p11 = providers.Object(2)
p13 = providers.Object(22)
self.assertIsNot(_Container.p11, _Container1.p11)
self.assertIsNot(_Container.p12, _Container1.p12)
self.assertIsNot(_Container.p11, _Container2.p11)
self.assertIsNot(_Container.p12, _Container2.p12)
self.assertIsNot(_Container1.p11, _Container2.p11)
self.assertIsNot(_Container1.p12, _Container2.p12)
self.assertEqual(_Container.p12(), {'p11': 0})
self.assertEqual(_Container1.p12(), {'p11': 1})
self.assertEqual(_Container2.p12(), {'p11': 2})
self.assertEqual(_Container1.p13(), 11)
self.assertEqual(_Container2.p13(), 22)
def test_copy_with_parent_dependency(self):
# See: https://github.com/ets-labs/python-dependency-injector/issues/477
class Base(containers.DeclarativeContainer):
p11 = providers.Object(0)
p12 = providers.Factory(dict, p11=p11)
@containers.copy(Base)
class New(Base):
p13 = providers.Factory(dict, p12=Base.p12)
new1 = New()
new2 = New(p11=1)
new3 = New(p11=2)
self.assertEqual(new1.p13(), {'p12': {'p11': 0}})
self.assertEqual(new2.p13(), {'p12': {'p11': 1}})
self.assertEqual(new3.p13(), {'p12': {'p11': 2}})
def test_copy_with_replacing_subcontainer_providers(self):
# See: https://github.com/ets-labs/python-dependency-injector/issues/374
class X(containers.DeclarativeContainer):
foo = providers.Dependency(instance_of=str)
def build_x():
return X(foo='1')
class A(containers.DeclarativeContainer):
x = providers.DependenciesContainer(**X.providers)
y = x.foo
@containers.copy(A)
class B1(A):
x = providers.Container(build_x)
b1 = B1()
self.assertEqual(b1.y(), '1')
def test_containers_attribute(self):
class Container(containers.DeclarativeContainer):
class Container1(containers.DeclarativeContainer):
pass
class Container2(containers.DeclarativeContainer):
pass
Container3 = containers.DynamicContainer()
self.assertEqual(Container.containers,
dict(Container1=Container.Container1,
Container2=Container.Container2,
Container3=Container.Container3))
def test_init_with_overriding_providers(self):
p1 = providers.Provider()
p2 = providers.Provider()
container = ContainerA(p11=p1, p12=p2)
self.assertIs(container.p11.last_overriding, p1)
self.assertIs(container.p12.last_overriding, p2)
def test_init_with_overridden_dependency(self):
# Bug:
# https://github.com/ets-labs/python-dependency-injector/issues/198
class _Container(containers.DeclarativeContainer):
p1 = providers.Dependency(instance_of=int)
p2 = providers.Dependency(object)
p2.override(providers.Factory(dict, p1=p1))
container = _Container(p1=1)
self.assertEqual(container.p2(), {'p1': 1})
self.assertIs(
container.p2.last_overriding.kwargs['p1'],
container.p1,
)
self.assertIsNot(
container.p2.last_overriding.kwargs['p1'],
_Container.p1,
)
self.assertIs(
_Container.p2.last_overriding.kwargs['p1'],
_Container.p1,
)
def test_init_with_chained_dependency(self):
# Bug:
# https://github.com/ets-labs/python-dependency-injector/issues/200
class _Container(containers.DeclarativeContainer):
p1 = providers.Dependency(instance_of=int)
p2 = providers.Factory(p1)
container = _Container(p1=1)
self.assertEqual(container.p2(), 1)
self.assertIs(container.p2.cls, container.p1)
self.assertIs(_Container.p2.cls, _Container.p1)
self.assertIsNot(container.p2.cls, _Container.p1)
def test_init_with_dependency_delegation(self):
# Bug:
# https://github.com/ets-labs/python-dependency-injector/issues/235
A = collections.namedtuple('A', [])
B = collections.namedtuple('B', ['fa'])
C = collections.namedtuple('B', ['a'])
class Services(containers.DeclarativeContainer):
a = providers.Dependency()
c = providers.Factory(C, a=a)
b = providers.Factory(B, fa=a.provider)
a = providers.Factory(A)
assert isinstance(Services(a=a).c().a, A) # ok
Services(a=a).b().fa()
def test_init_with_grand_child_provider(self):
# Bug:
# https://github.com/ets-labs/python-dependency-injector/issues/350
provider = providers.Provider()
container = ContainerC(p11=provider)
self.assertIsInstance(container.p11, providers.Provider)
self.assertIsInstance(container.p12, providers.Provider)
self.assertIsInstance(container.p21, providers.Provider)
self.assertIsInstance(container.p22, providers.Provider)
self.assertIsInstance(container.p31, providers.Provider)
self.assertIsInstance(container.p32, providers.Provider)
self.assertIs(container.p11.last_overriding, provider)
def test_parent_set_in__new__(self):
class Container(containers.DeclarativeContainer):
dependency = providers.Dependency()
dependencies_container = providers.DependenciesContainer()
container = providers.Container(ContainerA)
self.assertIs(Container.dependency.parent, Container)
self.assertIs(Container.dependencies_container.parent, Container)
self.assertIs(Container.container.parent, Container)
def test_parent_set_in__setattr__(self):
class Container(containers.DeclarativeContainer):
pass
Container.dependency = providers.Dependency()
Container.dependencies_container = providers.DependenciesContainer()
Container.container = providers.Container(ContainerA)
self.assertIs(Container.dependency.parent, Container)
self.assertIs(Container.dependencies_container.parent, Container)
self.assertIs(Container.container.parent, Container)
def test_resolve_provider_name(self):
self.assertEqual(ContainerA.resolve_provider_name(ContainerA.p11), 'p11')
def test_resolve_provider_name_no_provider(self):
with self.assertRaises(errors.Error):
ContainerA.resolve_provider_name(providers.Provider())
def test_child_dependency_parent_name(self):
class Container(containers.DeclarativeContainer):
dependency = providers.Dependency()
with self.assertRaises(errors.Error) as context:
Container.dependency()
self.assertEqual(
str(context.exception),
'Dependency "Container.dependency" is not defined',
)
def test_child_dependencies_container_parent_name(self):
class Container(containers.DeclarativeContainer):
dependencies_container = providers.DependenciesContainer()
with self.assertRaises(errors.Error) as context:
Container.dependencies_container.dependency()
self.assertEqual(
str(context.exception),
'Dependency "Container.dependencies_container.dependency" is not defined',
)
def test_child_container_parent_name(self):
class ChildContainer(containers.DeclarativeContainer):
dependency = providers.Dependency()
class Container(containers.DeclarativeContainer):
child_container = providers.Container(ChildContainer)
with self.assertRaises(errors.Error) as context:
Container.child_container.dependency()
self.assertEqual(
str(context.exception),
'Dependency "Container.child_container.dependency" is not defined',
)
class DeclarativeContainerWithCustomStringTests(unittest.TestCase):
# See: https://github.com/ets-labs/python-dependency-injector/issues/479
class CustomString(str):
pass
class CustomClass:
thing = None
class CustomContainer(containers.DeclarativeContainer):
pass
def setUp(self):
self.container = self.CustomContainer
self.provider = providers.Provider()
def test_setattr(self):
setattr(self.container, self.CustomString('test_attr'), self.provider)
self.assertIs(self.container.test_attr, self.provider)
def test_delattr(self):
setattr(self.container, self.CustomString('test_attr'), self.provider)
delattr(self.container, self.CustomString('test_attr'))
with self.assertRaises(AttributeError):
self.container.test_attr
| 37.201852 | 86 | 0.603116 |
import collections
import unittest
from dependency_injector import (
containers,
providers,
errors,
)
class ContainerA(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
class ContainerB(ContainerA):
p21 = providers.Provider()
p22 = providers.Provider()
class ContainerC(ContainerB):
p31 = providers.Provider()
p32 = providers.Provider()
class DeclarativeContainerTests(unittest.TestCase):
def test_providers_attribute(self):
self.assertEqual(ContainerA.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerB.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22))
self.assertEqual(ContainerC.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22,
p31=ContainerC.p31,
p32=ContainerC.p32))
def test_providers_attribute_with_redefinition(self):
p1 = providers.Provider()
p2 = providers.Provider()
class ContainerA2(ContainerA):
p11 = p1
p12 = p2
self.assertEqual(
ContainerA.providers,
{
'p11': ContainerA.p11,
'p12': ContainerA.p12,
},
)
self.assertEqual(
ContainerA2.providers,
{
'p11': p1,
'p12': p2,
},
)
def test_cls_providers_attribute(self):
self.assertEqual(ContainerA.cls_providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerB.cls_providers, dict(p21=ContainerB.p21,
p22=ContainerB.p22))
self.assertEqual(ContainerC.cls_providers, dict(p31=ContainerC.p31,
p32=ContainerC.p32))
def test_inherited_providers_attribute(self):
self.assertEqual(ContainerA.inherited_providers, dict())
self.assertEqual(ContainerB.inherited_providers,
dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerC.inherited_providers,
dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22))
def test_dependencies_attribute(self):
class ContainerD(ContainerC):
p41 = providers.Dependency()
p42 = providers.DependenciesContainer()
class ContainerE(ContainerD):
p51 = providers.Dependency()
p52 = providers.DependenciesContainer()
self.assertEqual(
ContainerD.dependencies,
{
'p41': ContainerD.p41,
'p42': ContainerD.p42,
},
)
self.assertEqual(
ContainerE.dependencies,
{
'p41': ContainerD.p41,
'p42': ContainerD.p42,
'p51': ContainerE.p51,
'p52': ContainerE.p52,
},
)
def test_set_get_del_providers(self):
a_p13 = providers.Provider()
b_p23 = providers.Provider()
ContainerA.p13 = a_p13
ContainerB.p23 = b_p23
self.assertEqual(ContainerA.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p13=a_p13))
self.assertEqual(ContainerB.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22,
p23=b_p23))
self.assertEqual(ContainerA.cls_providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p13=a_p13))
self.assertEqual(ContainerB.cls_providers, dict(p21=ContainerB.p21,
p22=ContainerB.p22,
p23=b_p23))
del ContainerA.p13
del ContainerB.p23
self.assertEqual(ContainerA.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerB.providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12,
p21=ContainerB.p21,
p22=ContainerB.p22))
self.assertEqual(ContainerA.cls_providers, dict(p11=ContainerA.p11,
p12=ContainerA.p12))
self.assertEqual(ContainerB.cls_providers, dict(p21=ContainerB.p21,
p22=ContainerB.p22))
def test_declare_with_valid_provider_type(self):
class _Container(containers.DeclarativeContainer):
provider_type = providers.Object
px = providers.Object(object())
self.assertIsInstance(_Container.px, providers.Object)
def test_declare_with_invalid_provider_type(self):
with self.assertRaises(errors.Error):
class _Container(containers.DeclarativeContainer):
provider_type = providers.Object
px = providers.Provider()
def test_seth_valid_provider_type(self):
class _Container(containers.DeclarativeContainer):
provider_type = providers.Object
_Container.px = providers.Object(object())
self.assertIsInstance(_Container.px, providers.Object)
def test_set_invalid_provider_type(self):
class _Container(containers.DeclarativeContainer):
provider_type = providers.Object
with self.assertRaises(errors.Error):
_Container.px = providers.Provider()
def test_override(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer1(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer2(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
_Container.override(_OverridingContainer1)
_Container.override(_OverridingContainer2)
self.assertEqual(_Container.overridden,
(_OverridingContainer1,
_OverridingContainer2))
self.assertEqual(_Container.p11.overridden,
(_OverridingContainer1.p11,
_OverridingContainer2.p11))
def test_override_with_itself(self):
with self.assertRaises(errors.Error):
ContainerA.override(ContainerA)
def test_override_with_parent(self):
with self.assertRaises(errors.Error):
ContainerB.override(ContainerA)
def test_override_decorator(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Provider()
@containers.override(_Container)
class _OverridingContainer1(containers.DeclarativeContainer):
p11 = providers.Provider()
@containers.override(_Container)
class _OverridingContainer2(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
self.assertEqual(_Container.overridden,
(_OverridingContainer1,
_OverridingContainer2))
self.assertEqual(_Container.p11.overridden,
(_OverridingContainer1.p11,
_OverridingContainer2.p11))
def test_reset_last_overriding(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer1(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer2(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
_Container.override(_OverridingContainer1)
_Container.override(_OverridingContainer2)
_Container.reset_last_overriding()
self.assertEqual(_Container.overridden,
(_OverridingContainer1,))
self.assertEqual(_Container.p11.overridden,
(_OverridingContainer1.p11,))
def test_reset_last_overriding_when_not_overridden(self):
with self.assertRaises(errors.Error):
ContainerA.reset_last_overriding()
def test_reset_override(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer1(containers.DeclarativeContainer):
p11 = providers.Provider()
class _OverridingContainer2(containers.DeclarativeContainer):
p11 = providers.Provider()
p12 = providers.Provider()
_Container.override(_OverridingContainer1)
_Container.override(_OverridingContainer2)
_Container.reset_override()
self.assertEqual(_Container.overridden, tuple())
self.assertEqual(_Container.p11.overridden, tuple())
def test_copy(self):
@containers.copy(ContainerA)
class _Container1(ContainerA):
pass
@containers.copy(ContainerA)
class _Container2(ContainerA):
pass
self.assertIsNot(ContainerA.p11, _Container1.p11)
self.assertIsNot(ContainerA.p12, _Container1.p12)
self.assertIsNot(ContainerA.p11, _Container2.p11)
self.assertIsNot(ContainerA.p12, _Container2.p12)
self.assertIsNot(_Container1.p11, _Container2.p11)
self.assertIsNot(_Container1.p12, _Container2.p12)
def test_copy_with_replacing(self):
class _Container(containers.DeclarativeContainer):
p11 = providers.Object(0)
p12 = providers.Factory(dict, p11=p11)
@containers.copy(_Container)
class _Container1(_Container):
p11 = providers.Object(1)
p13 = providers.Object(11)
@containers.copy(_Container)
class _Container2(_Container):
p11 = providers.Object(2)
p13 = providers.Object(22)
self.assertIsNot(_Container.p11, _Container1.p11)
self.assertIsNot(_Container.p12, _Container1.p12)
self.assertIsNot(_Container.p11, _Container2.p11)
self.assertIsNot(_Container.p12, _Container2.p12)
self.assertIsNot(_Container1.p11, _Container2.p11)
self.assertIsNot(_Container1.p12, _Container2.p12)
self.assertEqual(_Container.p12(), {'p11': 0})
self.assertEqual(_Container1.p12(), {'p11': 1})
self.assertEqual(_Container2.p12(), {'p11': 2})
self.assertEqual(_Container1.p13(), 11)
self.assertEqual(_Container2.p13(), 22)
def test_copy_with_parent_dependency(self):
class Base(containers.DeclarativeContainer):
p11 = providers.Object(0)
p12 = providers.Factory(dict, p11=p11)
@containers.copy(Base)
class New(Base):
p13 = providers.Factory(dict, p12=Base.p12)
new1 = New()
new2 = New(p11=1)
new3 = New(p11=2)
self.assertEqual(new1.p13(), {'p12': {'p11': 0}})
self.assertEqual(new2.p13(), {'p12': {'p11': 1}})
self.assertEqual(new3.p13(), {'p12': {'p11': 2}})
def test_copy_with_replacing_subcontainer_providers(self):
class X(containers.DeclarativeContainer):
foo = providers.Dependency(instance_of=str)
def build_x():
return X(foo='1')
class A(containers.DeclarativeContainer):
x = providers.DependenciesContainer(**X.providers)
y = x.foo
@containers.copy(A)
class B1(A):
x = providers.Container(build_x)
b1 = B1()
self.assertEqual(b1.y(), '1')
def test_containers_attribute(self):
class Container(containers.DeclarativeContainer):
class Container1(containers.DeclarativeContainer):
pass
class Container2(containers.DeclarativeContainer):
pass
Container3 = containers.DynamicContainer()
self.assertEqual(Container.containers,
dict(Container1=Container.Container1,
Container2=Container.Container2,
Container3=Container.Container3))
def test_init_with_overriding_providers(self):
p1 = providers.Provider()
p2 = providers.Provider()
container = ContainerA(p11=p1, p12=p2)
self.assertIs(container.p11.last_overriding, p1)
self.assertIs(container.p12.last_overriding, p2)
def test_init_with_overridden_dependency(self):
class _Container(containers.DeclarativeContainer):
p1 = providers.Dependency(instance_of=int)
p2 = providers.Dependency(object)
p2.override(providers.Factory(dict, p1=p1))
container = _Container(p1=1)
self.assertEqual(container.p2(), {'p1': 1})
self.assertIs(
container.p2.last_overriding.kwargs['p1'],
container.p1,
)
self.assertIsNot(
container.p2.last_overriding.kwargs['p1'],
_Container.p1,
)
self.assertIs(
_Container.p2.last_overriding.kwargs['p1'],
_Container.p1,
)
def test_init_with_chained_dependency(self):
class _Container(containers.DeclarativeContainer):
p1 = providers.Dependency(instance_of=int)
p2 = providers.Factory(p1)
container = _Container(p1=1)
self.assertEqual(container.p2(), 1)
self.assertIs(container.p2.cls, container.p1)
self.assertIs(_Container.p2.cls, _Container.p1)
self.assertIsNot(container.p2.cls, _Container.p1)
def test_init_with_dependency_delegation(self):
A = collections.namedtuple('A', [])
B = collections.namedtuple('B', ['fa'])
C = collections.namedtuple('B', ['a'])
class Services(containers.DeclarativeContainer):
a = providers.Dependency()
c = providers.Factory(C, a=a)
b = providers.Factory(B, fa=a.provider)
a = providers.Factory(A)
assert isinstance(Services(a=a).c().a, A)
Services(a=a).b().fa()
def test_init_with_grand_child_provider(self):
provider = providers.Provider()
container = ContainerC(p11=provider)
self.assertIsInstance(container.p11, providers.Provider)
self.assertIsInstance(container.p12, providers.Provider)
self.assertIsInstance(container.p21, providers.Provider)
self.assertIsInstance(container.p22, providers.Provider)
self.assertIsInstance(container.p31, providers.Provider)
self.assertIsInstance(container.p32, providers.Provider)
self.assertIs(container.p11.last_overriding, provider)
def test_parent_set_in__new__(self):
class Container(containers.DeclarativeContainer):
dependency = providers.Dependency()
dependencies_container = providers.DependenciesContainer()
container = providers.Container(ContainerA)
self.assertIs(Container.dependency.parent, Container)
self.assertIs(Container.dependencies_container.parent, Container)
self.assertIs(Container.container.parent, Container)
def test_parent_set_in__setattr__(self):
class Container(containers.DeclarativeContainer):
pass
Container.dependency = providers.Dependency()
Container.dependencies_container = providers.DependenciesContainer()
Container.container = providers.Container(ContainerA)
self.assertIs(Container.dependency.parent, Container)
self.assertIs(Container.dependencies_container.parent, Container)
self.assertIs(Container.container.parent, Container)
def test_resolve_provider_name(self):
self.assertEqual(ContainerA.resolve_provider_name(ContainerA.p11), 'p11')
def test_resolve_provider_name_no_provider(self):
with self.assertRaises(errors.Error):
ContainerA.resolve_provider_name(providers.Provider())
def test_child_dependency_parent_name(self):
class Container(containers.DeclarativeContainer):
dependency = providers.Dependency()
with self.assertRaises(errors.Error) as context:
Container.dependency()
self.assertEqual(
str(context.exception),
'Dependency "Container.dependency" is not defined',
)
def test_child_dependencies_container_parent_name(self):
class Container(containers.DeclarativeContainer):
dependencies_container = providers.DependenciesContainer()
with self.assertRaises(errors.Error) as context:
Container.dependencies_container.dependency()
self.assertEqual(
str(context.exception),
'Dependency "Container.dependencies_container.dependency" is not defined',
)
def test_child_container_parent_name(self):
class ChildContainer(containers.DeclarativeContainer):
dependency = providers.Dependency()
class Container(containers.DeclarativeContainer):
child_container = providers.Container(ChildContainer)
with self.assertRaises(errors.Error) as context:
Container.child_container.dependency()
self.assertEqual(
str(context.exception),
'Dependency "Container.child_container.dependency" is not defined',
)
class DeclarativeContainerWithCustomStringTests(unittest.TestCase):
class CustomString(str):
pass
class CustomClass:
thing = None
class CustomContainer(containers.DeclarativeContainer):
pass
def setUp(self):
self.container = self.CustomContainer
self.provider = providers.Provider()
def test_setattr(self):
setattr(self.container, self.CustomString('test_attr'), self.provider)
self.assertIs(self.container.test_attr, self.provider)
def test_delattr(self):
setattr(self.container, self.CustomString('test_attr'), self.provider)
delattr(self.container, self.CustomString('test_attr'))
with self.assertRaises(AttributeError):
self.container.test_attr
| true | true |
f731bddc26f0944e091eb4201d1fed5699a2d7df | 359 | py | Python | sdk/python/pulumi_azure/notificationhub/__init__.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/notificationhub/__init__.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/notificationhub/__init__.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .hub import *
from .authorization_rule import *
from .namespace import *
from .get_hub import *
from .get_namespace import *
| 32.636364 | 87 | 0.727019 |
# Export this package's modules as members:
from .hub import *
from .authorization_rule import *
from .namespace import *
from .get_hub import *
from .get_namespace import *
| true | true |
f731be3829420df3473b875de15a7e7c4ab77ba2 | 653 | py | Python | synlib/descriptions/ADDFXL.py | vhnatyk/vlsistuff | 0981097bd19a0c482728dcc5048a3615ac9a9a90 | [
"MIT"
] | 26 | 2018-03-17T18:14:22.000Z | 2022-03-14T07:23:13.000Z | synlib/descriptions/ADDFXL.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 1 | 2019-10-16T10:31:11.000Z | 2019-10-17T04:14:53.000Z | synlib/descriptions/ADDFXL.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 7 | 2018-07-16T07:51:25.000Z | 2022-02-15T14:22:54.000Z | Desc = cellDescClass("ADDFXL")
Desc.properties["cell_footprint"] = "addf"
Desc.properties["area"] = "69.854400"
Desc.properties["cell_leakage_power"] = "3632.360760"
Desc.pinOrder = ['A', 'B', 'CI', 'CO', 'S']
Desc.add_arc("A","S","combi")
Desc.add_arc("B","S","combi")
Desc.add_arc("CI","S","combi")
Desc.add_arc("A","CO","combi")
Desc.add_arc("B","CO","combi")
Desc.add_arc("CI","CO","combi")
Desc.add_param("area",69.854400);
Desc.add_pin("A","input")
Desc.add_pin("B","input")
Desc.add_pin("CI","input")
Desc.add_pin("S","output")
Desc.add_pin_func("S","unknown")
Desc.add_pin("CO","output")
Desc.add_pin_func("CO","unknown")
CellLib["ADDFXL"]=Desc
| 31.095238 | 53 | 0.666156 | Desc = cellDescClass("ADDFXL")
Desc.properties["cell_footprint"] = "addf"
Desc.properties["area"] = "69.854400"
Desc.properties["cell_leakage_power"] = "3632.360760"
Desc.pinOrder = ['A', 'B', 'CI', 'CO', 'S']
Desc.add_arc("A","S","combi")
Desc.add_arc("B","S","combi")
Desc.add_arc("CI","S","combi")
Desc.add_arc("A","CO","combi")
Desc.add_arc("B","CO","combi")
Desc.add_arc("CI","CO","combi")
Desc.add_param("area",69.854400);
Desc.add_pin("A","input")
Desc.add_pin("B","input")
Desc.add_pin("CI","input")
Desc.add_pin("S","output")
Desc.add_pin_func("S","unknown")
Desc.add_pin("CO","output")
Desc.add_pin_func("CO","unknown")
CellLib["ADDFXL"]=Desc
| true | true |
f731beda637d2a5569a79ee173f7c5968ddc0fe2 | 1,688 | py | Python | base/log.py | testtuantuan/appTest | 2717b30b2cc63080cb0c68d72f4a772daf49e5c3 | [
"BSD-3-Clause"
] | null | null | null | base/log.py | testtuantuan/appTest | 2717b30b2cc63080cb0c68d72f4a772daf49e5c3 | [
"BSD-3-Clause"
] | null | null | null | base/log.py | testtuantuan/appTest | 2717b30b2cc63080cb0c68d72f4a772daf49e5c3 | [
"BSD-3-Clause"
] | null | null | null | # !/uer/bin/env python3
# coding=utf-8
import datetime
import logging
import functools
import os
import traceback
import inspect
if "logs" in os.listdir('../'):
pass
else:
os.mkdir('../logs')
now = datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
_log_fp = "../logs/" + now + ".log"
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=_log_fp,
filemode='w')
_console = logging.StreamHandler()
_console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
_console.setFormatter(formatter)
LOGGER = logging.getLogger('czb test')
LOGGER.addHandler(_console)
def logged(method):
"""创建一个日志装饰器,它会记录所装饰函数的入参和
这是一个很糟糕的代码,需要把logging模块替换为CLog
"""
return_value = None
@functools.wraps(method)
def inner(*args, **kwargs):
start = datetime.datetime.now()
try:
nonlocal return_value
return_value = method(*args, **kwargs)
except Exception:
e = traceback.format_exc()
LOGGER.error('Exception:{}'.format(e))
finally:
pass
end = datetime.datetime.now()
delta = end - start
LOGGER.info('调用 {}函数;\n 传入参数: {}\n 或许还有: {},\n 返回结果: {} ;\n'
.format(inspect.stack()[1][3], str(args), str(kwargs), return_value))
LOGGER.warning('调用 {}函数;\n 时间 {};\n 执行时间 {} ;\n'
.format(inspect.stack()[1][3], start, delta, return_value))
return return_value
return inner
| 28.133333 | 97 | 0.584716 |
import datetime
import logging
import functools
import os
import traceback
import inspect
if "logs" in os.listdir('../'):
pass
else:
os.mkdir('../logs')
now = datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
_log_fp = "../logs/" + now + ".log"
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=_log_fp,
filemode='w')
_console = logging.StreamHandler()
_console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
_console.setFormatter(formatter)
LOGGER = logging.getLogger('czb test')
LOGGER.addHandler(_console)
def logged(method):
return_value = None
@functools.wraps(method)
def inner(*args, **kwargs):
start = datetime.datetime.now()
try:
nonlocal return_value
return_value = method(*args, **kwargs)
except Exception:
e = traceback.format_exc()
LOGGER.error('Exception:{}'.format(e))
finally:
pass
end = datetime.datetime.now()
delta = end - start
LOGGER.info('调用 {}函数;\n 传入参数: {}\n 或许还有: {},\n 返回结果: {} ;\n'
.format(inspect.stack()[1][3], str(args), str(kwargs), return_value))
LOGGER.warning('调用 {}函数;\n 时间 {};\n 执行时间 {} ;\n'
.format(inspect.stack()[1][3], start, delta, return_value))
return return_value
return inner
| true | true |
f731bfab7f7356124c08783189c0a2d6c9d964d1 | 4,425 | py | Python | test/functional/wallet_part_segwit_scripts.py | bleach86/ghost-core | 59824a5e00fbc500eeec28950999a05967bad608 | [
"MIT"
] | null | null | null | test/functional/wallet_part_segwit_scripts.py | bleach86/ghost-core | 59824a5e00fbc500eeec28950999a05967bad608 | [
"MIT"
] | null | null | null | test/functional/wallet_part_segwit_scripts.py | bleach86/ghost-core | 59824a5e00fbc500eeec28950999a05967bad608 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The Particl Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_particl import GhostTestFramework
class SegwitScriptsTest(GhostTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [ ['-debug','-noacceptnonstdtxn','-reservebalance=10000000'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes_bi(0, 1)
self.connect_nodes_bi(0, 2)
self.sync_all()
def run_test(self):
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
self.import_genesis_coins_b(nodes[1])
nodes[2].extkeyimportmaster(nodes[2].mnemonic('new')['master'])
addr_part_native = nodes[2].getnewaddress('addr_part_native')
nodes[1].sendtoaddress(addr_part_native, 1)
self.log.info('Test Bitcoin native segwit, p2wpkh')
addr_sw_bech32 = nodes[2].getnewaddress('segwit script', False, False, False, 'bech32')
nodes[2].manageaddressbook('newsend', addr_sw_bech32)
nodes[1].sendtoaddress(addr_sw_bech32, 2)
self.log.info('Test Bitcoin embedded segwit')
addr_sw_p2sh = nodes[2].getnewaddress('segwit script', False, False, False, 'p2sh-segwit')
nodes[2].manageaddressbook('newsend', addr_sw_p2sh)
nodes[1].sendtoaddress(addr_sw_p2sh, 3)
ro = nodes[2].getaddressinfo(addr_part_native)
assert(ro['iswitness'] == False)
pk0 = ro['pubkey']
ro = nodes[2].getaddressinfo(addr_sw_bech32)
assert(ro['witness_version'] == 0)
pk1 = ro['pubkey']
ro = nodes[2].getaddressinfo(addr_sw_p2sh)
assert(ro['script'] == 'witness_v0_keyhash')
pk2 = ro['pubkey']
self.log.info('Test P2SH')
ms_standard = nodes[2].addmultisigaddress_part(2, [pk0, pk1])
ms_p2shsegwit = nodes[2].addmultisigaddress_part(2, [pk0, pk2], 'ms_p2shsegwit', False, False, 'p2sh-segwit')
ms_btcnative = nodes[2].addmultisigaddress_part(2, [pk1, pk2], 'ms_btcnative', False, False, 'bech32')
ro = nodes[2].getaddressinfo(ms_standard['address'])
assert(ro['iswitness'] == False)
script = nodes[2].decodescript(ms_standard['redeemScript'])
assert(ms_standard['address'] == script['p2sh'])
script = nodes[2].decodescript(ms_p2shsegwit['redeemScript'])
assert(ms_p2shsegwit['address'] == script['segwit']['p2sh-segwit'])
script = nodes[2].decodescript(ms_btcnative['redeemScript'])
assert(ms_btcnative['address'] in script['segwit']['addresses'])
nodes[1].sendtoaddress(ms_standard['address'], 4)
nodes[1].sendtoaddress(ms_p2shsegwit['address'], 5)
nodes[1].sendtoaddress(ms_btcnative['address'], 6)
self.sync_all()
txns = nodes[2].filtertransactions()
assert(len(txns) == 6)
walletinfo = nodes[2].getwalletinfo()
assert(walletinfo['balance'] == 0.0)
assert(walletinfo['unconfirmed_balance'] == 21.0)
self.stakeBlocks(1)
walletinfo = nodes[2].getwalletinfo()
assert(walletinfo['balance'] == 21.0)
assert(walletinfo['unconfirmed_balance'] == 0.0)
self.log.info('Test p2wpkh changeaddress')
addr_p2wpkh = nodes[1].getnewaddress('p2wpkh change addr', False, False, False, 'bech32')
assert(addr_p2wpkh.startswith('rtpw1'))
rv = nodes[1].walletsettings('changeaddress', {'address_standard': addr_p2wpkh})
assert(rv['changeaddress']['address_standard'] == addr_p2wpkh)
txid = nodes[1].sendtoaddress(ms_standard['address'], 7)
wtx = nodes[1].gettransaction(txid)
# addr_p2wpkh was derived from the external chain and won't be seen as change.
assert(len(wtx['details']) == 3)
addrs = set()
for i in range(3):
addrs.add(wtx['details'][i]['address'])
assert(len(addrs) == 2)
assert(ms_standard['address'] in addrs)
assert(addr_p2wpkh in addrs)
if __name__ == '__main__':
SegwitScriptsTest().main()
| 40.59633 | 118 | 0.654011 |
from test_framework.test_particl import GhostTestFramework
class SegwitScriptsTest(GhostTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [ ['-debug','-noacceptnonstdtxn','-reservebalance=10000000'] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.connect_nodes_bi(0, 1)
self.connect_nodes_bi(0, 2)
self.sync_all()
def run_test(self):
nodes = self.nodes
self.import_genesis_coins_a(nodes[0])
self.import_genesis_coins_b(nodes[1])
nodes[2].extkeyimportmaster(nodes[2].mnemonic('new')['master'])
addr_part_native = nodes[2].getnewaddress('addr_part_native')
nodes[1].sendtoaddress(addr_part_native, 1)
self.log.info('Test Bitcoin native segwit, p2wpkh')
addr_sw_bech32 = nodes[2].getnewaddress('segwit script', False, False, False, 'bech32')
nodes[2].manageaddressbook('newsend', addr_sw_bech32)
nodes[1].sendtoaddress(addr_sw_bech32, 2)
self.log.info('Test Bitcoin embedded segwit')
addr_sw_p2sh = nodes[2].getnewaddress('segwit script', False, False, False, 'p2sh-segwit')
nodes[2].manageaddressbook('newsend', addr_sw_p2sh)
nodes[1].sendtoaddress(addr_sw_p2sh, 3)
ro = nodes[2].getaddressinfo(addr_part_native)
assert(ro['iswitness'] == False)
pk0 = ro['pubkey']
ro = nodes[2].getaddressinfo(addr_sw_bech32)
assert(ro['witness_version'] == 0)
pk1 = ro['pubkey']
ro = nodes[2].getaddressinfo(addr_sw_p2sh)
assert(ro['script'] == 'witness_v0_keyhash')
pk2 = ro['pubkey']
self.log.info('Test P2SH')
ms_standard = nodes[2].addmultisigaddress_part(2, [pk0, pk1])
ms_p2shsegwit = nodes[2].addmultisigaddress_part(2, [pk0, pk2], 'ms_p2shsegwit', False, False, 'p2sh-segwit')
ms_btcnative = nodes[2].addmultisigaddress_part(2, [pk1, pk2], 'ms_btcnative', False, False, 'bech32')
ro = nodes[2].getaddressinfo(ms_standard['address'])
assert(ro['iswitness'] == False)
script = nodes[2].decodescript(ms_standard['redeemScript'])
assert(ms_standard['address'] == script['p2sh'])
script = nodes[2].decodescript(ms_p2shsegwit['redeemScript'])
assert(ms_p2shsegwit['address'] == script['segwit']['p2sh-segwit'])
script = nodes[2].decodescript(ms_btcnative['redeemScript'])
assert(ms_btcnative['address'] in script['segwit']['addresses'])
nodes[1].sendtoaddress(ms_standard['address'], 4)
nodes[1].sendtoaddress(ms_p2shsegwit['address'], 5)
nodes[1].sendtoaddress(ms_btcnative['address'], 6)
self.sync_all()
txns = nodes[2].filtertransactions()
assert(len(txns) == 6)
walletinfo = nodes[2].getwalletinfo()
assert(walletinfo['balance'] == 0.0)
assert(walletinfo['unconfirmed_balance'] == 21.0)
self.stakeBlocks(1)
walletinfo = nodes[2].getwalletinfo()
assert(walletinfo['balance'] == 21.0)
assert(walletinfo['unconfirmed_balance'] == 0.0)
self.log.info('Test p2wpkh changeaddress')
addr_p2wpkh = nodes[1].getnewaddress('p2wpkh change addr', False, False, False, 'bech32')
assert(addr_p2wpkh.startswith('rtpw1'))
rv = nodes[1].walletsettings('changeaddress', {'address_standard': addr_p2wpkh})
assert(rv['changeaddress']['address_standard'] == addr_p2wpkh)
txid = nodes[1].sendtoaddress(ms_standard['address'], 7)
wtx = nodes[1].gettransaction(txid)
assert(len(wtx['details']) == 3)
addrs = set()
for i in range(3):
addrs.add(wtx['details'][i]['address'])
assert(len(addrs) == 2)
assert(ms_standard['address'] in addrs)
assert(addr_p2wpkh in addrs)
if __name__ == '__main__':
SegwitScriptsTest().main()
| true | true |
f731c0b87e19e1774f95606b9d89e1dd2dc40b0a | 15,296 | py | Python | venv/lib/python3.6/site-packages/feedgen/ext/dc.py | jannahuang/blog | e1d8cfa9d79ac06097a0e55531bba9421fcbf283 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/feedgen/ext/dc.py | jannahuang/blog | e1d8cfa9d79ac06097a0e55531bba9421fcbf283 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/feedgen/ext/dc.py | jannahuang/blog | e1d8cfa9d79ac06097a0e55531bba9421fcbf283 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
feedgen.ext.dc
~~~~~~~~~~~~~~~~~~~
Extends the FeedGenerator to add Dubline Core Elements to the feeds.
Descriptions partly taken from
http://dublincore.org/documents/dcmi-terms/#elements-coverage
:copyright: 2013-2017, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from feedgen.ext.base import BaseExtension
from feedgen.util import xml_elem
class DcBaseExtension(BaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
def __init__(self):
# http://dublincore.org/documents/usageguide/elements.shtml
# http://dublincore.org/documents/dces/
# http://dublincore.org/documents/dcmi-terms/
self._dcelem_contributor = None
self._dcelem_coverage = None
self._dcelem_creator = None
self._dcelem_date = None
self._dcelem_description = None
self._dcelem_format = None
self._dcelem_identifier = None
self._dcelem_language = None
self._dcelem_publisher = None
self._dcelem_relation = None
self._dcelem_rights = None
self._dcelem_source = None
self._dcelem_subject = None
self._dcelem_title = None
self._dcelem_type = None
def extend_ns(self):
return {'dc': 'http://purl.org/dc/elements/1.1/'}
def _extend_xml(self, xml_element):
'''Extend xml_element with set DC fields.
:param xml_element: etree element
'''
DCELEMENTS_NS = 'http://purl.org/dc/elements/1.1/'
for elem in ['contributor', 'coverage', 'creator', 'date',
'description', 'language', 'publisher', 'relation',
'rights', 'source', 'subject', 'title', 'type', 'format',
'identifier']:
if hasattr(self, '_dcelem_%s' % elem):
for val in getattr(self, '_dcelem_%s' % elem) or []:
node = xml_elem('{%s}%s' % (DCELEMENTS_NS, elem),
xml_element)
node.text = val
def extend_atom(self, atom_feed):
'''Extend an Atom feed with the set DC fields.
:param atom_feed: The feed root element
:returns: The feed root element
'''
self._extend_xml(atom_feed)
return atom_feed
def extend_rss(self, rss_feed):
'''Extend a RSS feed with the set DC fields.
:param rss_feed: The feed root element
:returns: The feed root element.
'''
channel = rss_feed[0]
self._extend_xml(channel)
return rss_feed
def dc_contributor(self, contributor=None, replace=False):
'''Get or set the dc:contributor which is an entity responsible for
making contributions to the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-contributor
:param contributor: Contributor or list of contributors.
:param replace: Replace alredy set contributors (deault: False).
:returns: List of contributors.
'''
if contributor is not None:
if not isinstance(contributor, list):
contributor = [contributor]
if replace or not self._dcelem_contributor:
self._dcelem_contributor = []
self._dcelem_contributor += contributor
return self._dcelem_contributor
def dc_coverage(self, coverage=None, replace=True):
'''Get or set the dc:coverage which indicated the spatial or temporal
topic of the resource, the spatial applicability of the resource, or
the jurisdiction under which the resource is relevant.
Spatial topic and spatial applicability may be a named place or a
location specified by its geographic coordinates. Temporal topic may be
a named period, date, or date range. A jurisdiction may be a named
administrative entity or a geographic place to which the resource
applies. Recommended best practice is to use a controlled vocabulary
such as the Thesaurus of Geographic Names [TGN]. Where appropriate,
named places or time periods can be used in preference to numeric
identifiers such as sets of coordinates or date ranges.
References:
[TGN] http://www.getty.edu/research/tools/vocabulary/tgn/index.html
:param coverage: Coverage of the feed.
:param replace: Replace already set coverage (default: True).
:returns: Coverage of the feed.
'''
if coverage is not None:
if not isinstance(coverage, list):
coverage = [coverage]
if replace or not self._dcelem_coverage:
self._dcelem_coverage = []
self._dcelem_coverage = coverage
return self._dcelem_coverage
def dc_creator(self, creator=None, replace=False):
'''Get or set the dc:creator which is an entity primarily responsible
for making the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-creator
:param creator: Creator or list of creators.
:param replace: Replace alredy set creators (deault: False).
:returns: List of creators.
'''
if creator is not None:
if not isinstance(creator, list):
creator = [creator]
if replace or not self._dcelem_creator:
self._dcelem_creator = []
self._dcelem_creator += creator
return self._dcelem_creator
def dc_date(self, date=None, replace=True):
'''Get or set the dc:date which describes a point or period of time
associated with an event in the lifecycle of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-date
:param date: Date or list of dates.
:param replace: Replace alredy set dates (deault: True).
:returns: List of dates.
'''
if date is not None:
if not isinstance(date, list):
date = [date]
if replace or not self._dcelem_date:
self._dcelem_date = []
self._dcelem_date += date
return self._dcelem_date
def dc_description(self, description=None, replace=True):
'''Get or set the dc:description which is an account of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-description
:param description: Description or list of descriptions.
:param replace: Replace alredy set descriptions (deault: True).
:returns: List of descriptions.
'''
if description is not None:
if not isinstance(description, list):
description = [description]
if replace or not self._dcelem_description:
self._dcelem_description = []
self._dcelem_description += description
return self._dcelem_description
def dc_format(self, format=None, replace=True):
'''Get or set the dc:format which describes the file format, physical
medium, or dimensions of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-format
:param format: Format of the resource or list of formats.
:param replace: Replace alredy set format (deault: True).
:returns: Format of the resource.
'''
if format is not None:
if not isinstance(format, list):
format = [format]
if replace or not self._dcelem_format:
self._dcelem_format = []
self._dcelem_format += format
return self._dcelem_format
def dc_identifier(self, identifier=None, replace=True):
'''Get or set the dc:identifier which should be an unambiguous
reference to the resource within a given context.
For more inidentifierion see:
http://dublincore.org/documents/dcmi-terms/#elements-identifier
:param identifier: Identifier of the resource or list of identifiers.
:param replace: Replace alredy set identifier (deault: True).
:returns: Identifiers of the resource.
'''
if identifier is not None:
if not isinstance(identifier, list):
identifier = [identifier]
if replace or not self._dcelem_identifier:
self._dcelem_identifier = []
self._dcelem_identifier += identifier
return self._dcelem_identifier
def dc_language(self, language=None, replace=True):
'''Get or set the dc:language which describes a language of the
resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-language
:param language: Language or list of languages.
:param replace: Replace alredy set languages (deault: True).
:returns: List of languages.
'''
if language is not None:
if not isinstance(language, list):
language = [language]
if replace or not self._dcelem_language:
self._dcelem_language = []
self._dcelem_language += language
return self._dcelem_language
def dc_publisher(self, publisher=None, replace=False):
'''Get or set the dc:publisher which is an entity responsible for
making the resource available.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-publisher
:param publisher: Publisher or list of publishers.
:param replace: Replace alredy set publishers (deault: False).
:returns: List of publishers.
'''
if publisher is not None:
if not isinstance(publisher, list):
publisher = [publisher]
if replace or not self._dcelem_publisher:
self._dcelem_publisher = []
self._dcelem_publisher += publisher
return self._dcelem_publisher
def dc_relation(self, relation=None, replace=False):
'''Get or set the dc:relation which describes a related resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-relation
:param relation: Relation or list of relations.
:param replace: Replace alredy set relations (deault: False).
:returns: List of relations.
'''
if relation is not None:
if not isinstance(relation, list):
relation = [relation]
if replace or not self._dcelem_relation:
self._dcelem_relation = []
self._dcelem_relation += relation
return self._dcelem_relation
def dc_rights(self, rights=None, replace=False):
'''Get or set the dc:rights which may contain information about rights
held in and over the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-rights
:param rights: Rights information or list of rights information.
:param replace: Replace alredy set rightss (deault: False).
:returns: List of rights information.
'''
if rights is not None:
if not isinstance(rights, list):
rights = [rights]
if replace or not self._dcelem_rights:
self._dcelem_rights = []
self._dcelem_rights += rights
return self._dcelem_rights
def dc_source(self, source=None, replace=False):
'''Get or set the dc:source which is a related resource from which the
described resource is derived.
The described resource may be derived from the related resource in
whole or in part. Recommended best practice is to identify the related
resource by means of a string conforming to a formal identification
system.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-source
:param source: Source or list of sources.
:param replace: Replace alredy set sources (deault: False).
:returns: List of sources.
'''
if source is not None:
if not isinstance(source, list):
source = [source]
if replace or not self._dcelem_source:
self._dcelem_source = []
self._dcelem_source += source
return self._dcelem_source
def dc_subject(self, subject=None, replace=False):
'''Get or set the dc:subject which describes the topic of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-subject
:param subject: Subject or list of subjects.
:param replace: Replace alredy set subjects (deault: False).
:returns: List of subjects.
'''
if subject is not None:
if not isinstance(subject, list):
subject = [subject]
if replace or not self._dcelem_subject:
self._dcelem_subject = []
self._dcelem_subject += subject
return self._dcelem_subject
def dc_title(self, title=None, replace=True):
'''Get or set the dc:title which is a name given to the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-title
:param title: Title or list of titles.
:param replace: Replace alredy set titles (deault: False).
:returns: List of titles.
'''
if title is not None:
if not isinstance(title, list):
title = [title]
if replace or not self._dcelem_title:
self._dcelem_title = []
self._dcelem_title += title
return self._dcelem_title
def dc_type(self, type=None, replace=False):
'''Get or set the dc:type which describes the nature or genre of the
resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-type
:param type: Type or list of types.
:param replace: Replace alredy set types (deault: False).
:returns: List of types.
'''
if type is not None:
if not isinstance(type, list):
type = [type]
if replace or not self._dcelem_type:
self._dcelem_type = []
self._dcelem_type += type
return self._dcelem_type
class DcExtension(DcBaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
class DcEntryExtension(DcBaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
def extend_atom(self, entry):
'''Add dc elements to an atom item. Alters the item itself.
:param entry: An atom entry element.
:returns: The entry element.
'''
self._extend_xml(entry)
return entry
def extend_rss(self, item):
'''Add dc elements to a RSS item. Alters the item itself.
:param item: A RSS item element.
:returns: The item element.
'''
self._extend_xml(item)
return item
| 37.490196 | 79 | 0.622973 |
from feedgen.ext.base import BaseExtension
from feedgen.util import xml_elem
class DcBaseExtension(BaseExtension):
def __init__(self):
self._dcelem_contributor = None
self._dcelem_coverage = None
self._dcelem_creator = None
self._dcelem_date = None
self._dcelem_description = None
self._dcelem_format = None
self._dcelem_identifier = None
self._dcelem_language = None
self._dcelem_publisher = None
self._dcelem_relation = None
self._dcelem_rights = None
self._dcelem_source = None
self._dcelem_subject = None
self._dcelem_title = None
self._dcelem_type = None
def extend_ns(self):
return {'dc': 'http://purl.org/dc/elements/1.1/'}
def _extend_xml(self, xml_element):
DCELEMENTS_NS = 'http://purl.org/dc/elements/1.1/'
for elem in ['contributor', 'coverage', 'creator', 'date',
'description', 'language', 'publisher', 'relation',
'rights', 'source', 'subject', 'title', 'type', 'format',
'identifier']:
if hasattr(self, '_dcelem_%s' % elem):
for val in getattr(self, '_dcelem_%s' % elem) or []:
node = xml_elem('{%s}%s' % (DCELEMENTS_NS, elem),
xml_element)
node.text = val
def extend_atom(self, atom_feed):
self._extend_xml(atom_feed)
return atom_feed
def extend_rss(self, rss_feed):
channel = rss_feed[0]
self._extend_xml(channel)
return rss_feed
def dc_contributor(self, contributor=None, replace=False):
if contributor is not None:
if not isinstance(contributor, list):
contributor = [contributor]
if replace or not self._dcelem_contributor:
self._dcelem_contributor = []
self._dcelem_contributor += contributor
return self._dcelem_contributor
def dc_coverage(self, coverage=None, replace=True):
if coverage is not None:
if not isinstance(coverage, list):
coverage = [coverage]
if replace or not self._dcelem_coverage:
self._dcelem_coverage = []
self._dcelem_coverage = coverage
return self._dcelem_coverage
def dc_creator(self, creator=None, replace=False):
if creator is not None:
if not isinstance(creator, list):
creator = [creator]
if replace or not self._dcelem_creator:
self._dcelem_creator = []
self._dcelem_creator += creator
return self._dcelem_creator
def dc_date(self, date=None, replace=True):
if date is not None:
if not isinstance(date, list):
date = [date]
if replace or not self._dcelem_date:
self._dcelem_date = []
self._dcelem_date += date
return self._dcelem_date
def dc_description(self, description=None, replace=True):
if description is not None:
if not isinstance(description, list):
description = [description]
if replace or not self._dcelem_description:
self._dcelem_description = []
self._dcelem_description += description
return self._dcelem_description
def dc_format(self, format=None, replace=True):
if format is not None:
if not isinstance(format, list):
format = [format]
if replace or not self._dcelem_format:
self._dcelem_format = []
self._dcelem_format += format
return self._dcelem_format
def dc_identifier(self, identifier=None, replace=True):
if identifier is not None:
if not isinstance(identifier, list):
identifier = [identifier]
if replace or not self._dcelem_identifier:
self._dcelem_identifier = []
self._dcelem_identifier += identifier
return self._dcelem_identifier
def dc_language(self, language=None, replace=True):
if language is not None:
if not isinstance(language, list):
language = [language]
if replace or not self._dcelem_language:
self._dcelem_language = []
self._dcelem_language += language
return self._dcelem_language
def dc_publisher(self, publisher=None, replace=False):
if publisher is not None:
if not isinstance(publisher, list):
publisher = [publisher]
if replace or not self._dcelem_publisher:
self._dcelem_publisher = []
self._dcelem_publisher += publisher
return self._dcelem_publisher
def dc_relation(self, relation=None, replace=False):
if relation is not None:
if not isinstance(relation, list):
relation = [relation]
if replace or not self._dcelem_relation:
self._dcelem_relation = []
self._dcelem_relation += relation
return self._dcelem_relation
def dc_rights(self, rights=None, replace=False):
if rights is not None:
if not isinstance(rights, list):
rights = [rights]
if replace or not self._dcelem_rights:
self._dcelem_rights = []
self._dcelem_rights += rights
return self._dcelem_rights
def dc_source(self, source=None, replace=False):
if source is not None:
if not isinstance(source, list):
source = [source]
if replace or not self._dcelem_source:
self._dcelem_source = []
self._dcelem_source += source
return self._dcelem_source
def dc_subject(self, subject=None, replace=False):
if subject is not None:
if not isinstance(subject, list):
subject = [subject]
if replace or not self._dcelem_subject:
self._dcelem_subject = []
self._dcelem_subject += subject
return self._dcelem_subject
def dc_title(self, title=None, replace=True):
if title is not None:
if not isinstance(title, list):
title = [title]
if replace or not self._dcelem_title:
self._dcelem_title = []
self._dcelem_title += title
return self._dcelem_title
def dc_type(self, type=None, replace=False):
if type is not None:
if not isinstance(type, list):
type = [type]
if replace or not self._dcelem_type:
self._dcelem_type = []
self._dcelem_type += type
return self._dcelem_type
class DcExtension(DcBaseExtension):
class DcEntryExtension(DcBaseExtension):
def extend_atom(self, entry):
self._extend_xml(entry)
return entry
def extend_rss(self, item):
self._extend_xml(item)
return item
| true | true |
f731c242461116c49eda6d5115c4af06fc3b920c | 602 | py | Python | var/spack/repos/builtin/packages/opendx/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/opendx/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/opendx/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Opendx(AutotoolsPackage):
"""Open Visualization Data Explorer."""
homepage = "https://github.com/Mwoolsey/OpenDX"
git = "https://github.com/Mwoolsey/OpenDX.git"
version('master', branch='master')
depends_on('motif') # lesstif also works, but exhibits odd behaviors
depends_on('gl')
@run_before('autoreconf')
def distclean(self):
make('distclean')
| 28.666667 | 73 | 0.697674 |
class Opendx(AutotoolsPackage):
homepage = "https://github.com/Mwoolsey/OpenDX"
git = "https://github.com/Mwoolsey/OpenDX.git"
version('master', branch='master')
depends_on('motif')
depends_on('gl')
@run_before('autoreconf')
def distclean(self):
make('distclean')
| true | true |
f731c2a3b3050eec2782a2b33492c47117028a37 | 2,974 | py | Python | hatanaka/test/test_general_compression.py | valgur/hatanaka | 2f2f413050a922a507841546ba9ac5610a0dd882 | [
"MIT"
] | 5 | 2021-04-14T15:37:48.000Z | 2022-02-11T08:59:34.000Z | hatanaka/test/test_general_compression.py | valgur/hatanaka | 2f2f413050a922a507841546ba9ac5610a0dd882 | [
"MIT"
] | null | null | null | hatanaka/test/test_general_compression.py | valgur/hatanaka | 2f2f413050a922a507841546ba9ac5610a0dd882 | [
"MIT"
] | 1 | 2021-11-10T22:28:50.000Z | 2021-11-10T22:28:50.000Z | import gzip
import io
import shutil
import pytest
from hatanaka import compress, compress_on_disk, decompress, decompress_on_disk
from .conftest import clean, compress_pairs, decompress_pairs, get_data_path
@pytest.mark.parametrize(
'input_suffix, expected_suffix',
decompress_pairs
)
def test_decompress(tmp_path, crx_sample, rnx_bytes, input_suffix, expected_suffix):
# prepare
sample_path = tmp_path / ('sample' + input_suffix)
in_file = 'sample' + input_suffix
shutil.copy(get_data_path(in_file), sample_path)
# decompress
converted = decompress(sample_path)
# check
assert clean(converted) == clean(rnx_bytes)
converted = decompress(sample_path.read_bytes())
assert clean(converted) == clean(rnx_bytes)
def make_nav(txt):
return txt.replace(b'OBSERVATION', b'NAVIGATION ')
@pytest.mark.parametrize(
'input_suffix',
['.rnx', '.RNX', '.21n']
)
def test_decompress_non_obs(tmp_path, rnx_bytes, input_suffix):
# prepare
txt = make_nav(rnx_bytes)
sample_path = tmp_path / ('sample' + input_suffix + '.gz')
sample_path.write_bytes(gzip.compress(txt))
# decompress
out_path = decompress_on_disk(sample_path)
# check
assert out_path.exists()
assert out_path == tmp_path / ('sample' + input_suffix)
assert clean(out_path.read_bytes()) == clean(txt)
@pytest.mark.parametrize(
'input_suffix, compression, expected_suffix',
compress_pairs
)
def test_compress(tmp_path, crx_sample, rnx_bytes, input_suffix, compression, expected_suffix):
# prepare
in_file = 'sample' + input_suffix
sample_path = tmp_path / in_file
shutil.copy(get_data_path(in_file), sample_path)
# compress
converted = compress(sample_path, compression=compression)
# check
assert clean(decompress(converted)) == clean(rnx_bytes)
converted = compress(sample_path.read_bytes(), compression=compression)
assert clean(decompress(converted)) == clean(rnx_bytes)
@pytest.mark.parametrize(
'input_suffix',
['.rnx', '.RNX', '.21n']
)
def test_compress_non_obs(tmp_path, rnx_bytes, input_suffix):
# prepare
txt = make_nav(rnx_bytes)
sample_path = tmp_path / ('sample' + input_suffix)
sample_path.write_bytes(txt)
# compress
out_path = compress_on_disk(sample_path)
# check
assert out_path.exists()
assert out_path == tmp_path / ('sample' + input_suffix + '.gz')
assert clean(decompress(out_path)) == clean(txt)
def test_invalid_input(crx_str, rnx_bytes):
with pytest.raises(ValueError):
decompress(io.BytesIO(rnx_bytes))
with pytest.raises(ValueError):
compress(io.BytesIO(rnx_bytes))
def test_invalid_name(tmp_path, rnx_sample):
sample_path = tmp_path / 'sample'
shutil.copy(rnx_sample, sample_path)
with pytest.raises(ValueError) as excinfo:
decompress_on_disk(sample_path)
msg = excinfo.value.args[0]
assert msg.endswith('is not a valid RINEX file name')
| 30.659794 | 95 | 0.718225 | import gzip
import io
import shutil
import pytest
from hatanaka import compress, compress_on_disk, decompress, decompress_on_disk
from .conftest import clean, compress_pairs, decompress_pairs, get_data_path
@pytest.mark.parametrize(
'input_suffix, expected_suffix',
decompress_pairs
)
def test_decompress(tmp_path, crx_sample, rnx_bytes, input_suffix, expected_suffix):
sample_path = tmp_path / ('sample' + input_suffix)
in_file = 'sample' + input_suffix
shutil.copy(get_data_path(in_file), sample_path)
converted = decompress(sample_path)
assert clean(converted) == clean(rnx_bytes)
converted = decompress(sample_path.read_bytes())
assert clean(converted) == clean(rnx_bytes)
def make_nav(txt):
return txt.replace(b'OBSERVATION', b'NAVIGATION ')
@pytest.mark.parametrize(
'input_suffix',
['.rnx', '.RNX', '.21n']
)
def test_decompress_non_obs(tmp_path, rnx_bytes, input_suffix):
txt = make_nav(rnx_bytes)
sample_path = tmp_path / ('sample' + input_suffix + '.gz')
sample_path.write_bytes(gzip.compress(txt))
out_path = decompress_on_disk(sample_path)
assert out_path.exists()
assert out_path == tmp_path / ('sample' + input_suffix)
assert clean(out_path.read_bytes()) == clean(txt)
@pytest.mark.parametrize(
'input_suffix, compression, expected_suffix',
compress_pairs
)
def test_compress(tmp_path, crx_sample, rnx_bytes, input_suffix, compression, expected_suffix):
in_file = 'sample' + input_suffix
sample_path = tmp_path / in_file
shutil.copy(get_data_path(in_file), sample_path)
converted = compress(sample_path, compression=compression)
assert clean(decompress(converted)) == clean(rnx_bytes)
converted = compress(sample_path.read_bytes(), compression=compression)
assert clean(decompress(converted)) == clean(rnx_bytes)
@pytest.mark.parametrize(
'input_suffix',
['.rnx', '.RNX', '.21n']
)
def test_compress_non_obs(tmp_path, rnx_bytes, input_suffix):
txt = make_nav(rnx_bytes)
sample_path = tmp_path / ('sample' + input_suffix)
sample_path.write_bytes(txt)
out_path = compress_on_disk(sample_path)
assert out_path.exists()
assert out_path == tmp_path / ('sample' + input_suffix + '.gz')
assert clean(decompress(out_path)) == clean(txt)
def test_invalid_input(crx_str, rnx_bytes):
with pytest.raises(ValueError):
decompress(io.BytesIO(rnx_bytes))
with pytest.raises(ValueError):
compress(io.BytesIO(rnx_bytes))
def test_invalid_name(tmp_path, rnx_sample):
sample_path = tmp_path / 'sample'
shutil.copy(rnx_sample, sample_path)
with pytest.raises(ValueError) as excinfo:
decompress_on_disk(sample_path)
msg = excinfo.value.args[0]
assert msg.endswith('is not a valid RINEX file name')
| true | true |
f731c31f75ea7cd476db15705457c015ff3032c0 | 17,905 | py | Python | python/apogee/aspcap/teff.py | sdss/apogee | e134409dc14b20f69e68a0d4d34b2c1b5056a901 | [
"BSD-3-Clause"
] | 5 | 2019-04-11T13:35:24.000Z | 2019-11-14T06:12:51.000Z | python/apogee/aspcap/teff.py | sdss/apogee | e134409dc14b20f69e68a0d4d34b2c1b5056a901 | [
"BSD-3-Clause"
] | null | null | null | python/apogee/aspcap/teff.py | sdss/apogee | e134409dc14b20f69e68a0d4d34b2c1b5056a901 | [
"BSD-3-Clause"
] | 5 | 2018-09-20T22:07:43.000Z | 2021-01-15T07:13:38.000Z | # routines for calibrating/comparing effective temperatures with photometric sample
from apogee.utils import apload
from apogee.utils import apselect
from astropy.io import fits, ascii
from tools import match
from tools import plots
from tools import fit
from apogee.utils import bitmask
from apogee.aspcap import err
import pdb
import matplotlib.pyplot as plt
import numpy as np
import os
import matplotlib
def bindata(xdata,ydata,bins,median=True) :
"""
Given xdata, ydata, and bins in x, returns mean of ydata in each of the bins
"""
mean=bins*0.
for i in range(len(bins)-1) :
j=np.where((xdata>bins[i]) & (xdata<bins[i+1]))[0]
if median :
mean[i]=np.median(ydata[j])
else :
mean[i]=ydata[j].mean()
return mean
def ghb(allstar,glatmin=30.,ebvmax=0.03,trange=[3750,5500],loggrange=[-1,6],mhrange=[-2.5,0.75],alpha=False,out='teffcomp',yr=[-500,500],
calib=False,dr13=False,grid=None,cmap='rainbow',doerr=True) :
"""
Compares allstar ASPCPAP Teff with photometric Teff from GHB for sample of stars with GLAT>glatmin and SFD_EBV<ebvmax,
does fits
Args:
allstar : allStar structure
Keyword args:
glatmin (float) : minimum GLAT for sample (default=30)
ebvmax (float) : maximum SFD_EBV for sample (default=0.03)
dwarf (bool) : use dwarfs and dwarf GHB (default = False)
"""
# select data to use
badtarg=['YOUNG','EMBEDDED','EXTENDED','M31','M33','EMISSION','RRLYR','DSPH','MAGCLOUD']
# plots using Berger isochrone Teff for infomational purposes
if calib : param='PARAM'
else : param = 'FPARAM'
berger=fits.open(os.environ['APOGEE_DIR']+'/data/calib/teff_berger.fits')[1].data
gd=apselect.select(allstar,badval=['STAR_BAD'],badstar=['MULTIPLE_SUSPECT'],badtarg=badtarg,raw=True)
i1,i2=match.match(allstar['APOGEE_ID'][gd],berger['APOGEE_ID'])
fig,ax=plots.multi(1,1,figsize=(12,6))
plots.plotc(ax,allstar[param][gd[i1],3],allstar[param][gd[i1],0]-berger['TEFF'][i2],allstar[param][gd[i1],0],
xt='[M/H]',yt='ASPCAP-Berger',zt='Teff',xr=[-3,1],yr=[-500,500],zr=[4500,7000],colorbar=True)
ax.grid()
fig.savefig(out+'_berger_mh.png')
plt.close()
fig,ax=plots.multi(1,1,figsize=(12,6))
plots.plotc(ax,allstar[param][gd[i1],0],allstar[param][gd[i1],1],allstar[param][gd[i1],0]-berger['TEFF'][i2],
xt='Teff',yt='log ',zt='ASPCAP-Berger',xr=[8000,3000],yr=[6,-1],zr=[-250,250],colorbar=True)
ax.grid()
fig.savefig(out+'_berger_hr.png')
plt.close()
gd=apselect.select(allstar,badval=['STAR_BAD'],badstar=['MULTIPLE_SUSPECT'],badtarg=badtarg,teff=trange,mh=mhrange,logg=loggrange,raw=True)
allstar=allstar[gd]
#if dr13 :
# j=np.where((abs(allstar['GLAT'])>glatmin)&(allstar['SFD_EBV']<ebvmax))[0]
#else :
j=np.where((abs(allstar['GLAT'])>glatmin)&(allstar['SFD_EBV']>-0.01)&(allstar['SFD_EBV']<ebvmax)&(abs(allstar['J'])<90)&(abs(allstar['K'])<90))[0]
# remove second gen GC stars
#if not dr13 :
gcstars = ascii.read(os.environ['APOGEE_DIR']+'/data/calib/gc_szabolcs.dat')
bd=np.where(gcstars['pop'] != 1)[0]
j = [x for x in j if allstar[x]['APOGEE_ID'] not in gcstars['id'][bd]]
allstar=allstar[j]
ghb,dtdjk=cte_ghb(allstar['J']-allstar['K'],allstar['FPARAM'][:,3],dwarf=False)
ghb_dwarf,dtdjk_dwarf=cte_ghb(allstar['J']-allstar['K'],allstar['FPARAM'][:,3],dwarf=True)
# use dwarf relation for dwarfs
dw=np.where(allstar['FPARAM'][:,1] > 3.8)[0]
ghb[dw]=ghb_dwarf[dw]
dtdjk[dw]=dtdjk_dwarf[dw]
gd=np.where(abs(allstar['FPARAM'][:,0]-ghb) < 500)[0]
ghb=ghb[gd]
dtdjk=dtdjk[gd]
allstar=allstar[gd]
print('Teff calibration, number of stars: ', len(allstar))
if calib :
param='PARAM'
teff=allstar[param][:,0]
logg=allstar[param][:,1]
mh=allstar[param][:,3]
am=allstar[param][:,6]
elif grid is None :
param='FPARAM'
teff=allstar[param][:,0]
logg=allstar[param][:,1]
mh=allstar[param][:,3]
am=allstar[param][:,6]
else :
param='FPARAM_CLASS'
teff=allstar[param][:,grid,0]
logg=allstar[param][:,grid,1]
mh=allstar[param][:,grid,3]
am=allstar[param][:,grid,6]
out=out+'_grid{:1d}'.format(grid)
# HR digram plot of differences
fig,ax=plots.multi(1,1,figsize=(12,6))
plots.plotc(ax,teff,logg,teff-ghb, xt='Teff',yt='log ',zt='ASPCAP-GHB',xr=[8000,3000],yr=[6,-1],zr=[-250,250],colorbar=True)
ax.grid()
fig.savefig(out+'_ghb_hr.png')
plt.close()
# plot Teff difference against metallicity, color-code by temperature
fig,ax=plots.multi(1,1,hspace=0.001,wspace=0.001,figsize=(12,6))
xr=[-3.0,1.0]
zr=trange
if dr13: zr=[3500,5500]
binsize=0.25
bins=np.arange(-2.5,0.75,binsize)
# diff color-coded by gravity as f([M/H])
if alpha :
plots.plotc(ax,mh,teff-ghb,am,zr=[-0.1,0.4],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',
colorbar=True,zt=r'[$\alpha$/M]',rasterized=True,cmap=cmap)
else :
plots.plotc(ax,mh,teff-ghb,teff,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',
colorbar=True,zt='$T_{eff}$',rasterized=True,zr=trange,cmap=cmap)
ax.grid()
mean=bindata(mh,teff-ghb,bins,median=False)
if not dr13: plots.plotp(ax,bins+binsize/2.,mean,marker='o',size=40)
mean=bindata(mh,teff-ghb,bins,median=True)
if not dr13: plots.plotp(ax,bins+binsize/2.,mean,marker='o',size=40,color='b')
ax.text(0.1,0.9,'E(B-V)<{:6.2f}'.format(ebvmax),transform=ax.transAxes)
gd=np.where(np.isfinite(mean))[0]
tefit = fit.fit1d(bins[gd]+binsize/2.,mean[gd],degree=2,reject=0)
# 1D quadratic fit as a function of metallicity
allfit = fit.fit1d(mh,teff-ghb,ydata=teff,degree=2,reject=0)
fig2,ax2=plots.multi(1,1)
tefit2 = fit.fit2d(mh,teff,teff-ghb,reject=0,plot=ax2,zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\Delta Teff$')
#pfit = fit.fit2d(allstar[param][:,3],allstar[param][:,0],allstar[param][:,0]-ghb,plot=ax[0,0],zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\Delta Teff$')
#ejk=np.clip(np.sqrt(allstar['J_ERR']**2+allstar['K_ERR']**2),0.,0.02)
#errpar = err.errfit(teff,allstar['SNR'],mh,teff-tefit(mh)-ghb,title='Teff',out=out+'_phot',zr=[0,250],meanerr=abs(dtdjk)*ejk)
if doerr:
errpar = err.errfit(teff,allstar['SNR'],mh,teff-tefit(mh)-ghb,title='Teff',out=out,zr=[0,150])
else: errpar=0.
x=np.linspace(-3,1,200)
rms = (teff-tefit(mh)-ghb).std()
if dr13:
plots.plotl(ax,x,-36.17+95.97*x-15.09*x**2,color='k')
print(allfit)
else :
plots.plotl(ax,x,tefit(x),color='k')
ax.text(0.98,0.9,'rms: {:6.1f}'.format(rms),transform=ax.transAxes,ha='right')
cmap = matplotlib.cm.get_cmap(cmap)
for t in np.arange(trange[0],trange[1],500.) :
rgba=cmap((t-trange[0])/(trange[1]-trange[0]))
y=x*0.+t
plots.plotl(ax,x,tefit2(x,y),color=rgba)
plots._data_x = mh
plots._data_y = teff-ghb
plots._data = allstar
plots.event(fig)
# separate fits for low/hi alpha/M if requested
if alpha :
gdlo=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,logg=[0,3.8],alpha=[-0.1,0.1],raw=True)
mean=bindata(mh[gdlo],teff[gdlo]-ghb[gdlo],bins)
plots.plotp(ax,bins,mean,marker='o',size=40,color='g')
tmpfit = fit.fit1d(mh[gdlo],teff[gdlo]-ghb[gdlo],ydata=teff[gdlo],degree=2)
plots.plotl(ax,x,tmpfit(x))
print('low alpha: ', len(gdlo))
gdhi=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,logg=[0,3.8],alpha=[0.1,0.5],raw=True)
mean=bindata(mh[gdhi],teff[gdhi]-ghb[gdhi],bins)
plots.plotp(ax,bins,mean,marker='o',size=40,color='b')
tmpfit = fit.fit1d(mh[gdhi],teff[gdhi]-ghb[gdhi],ydata=teff[gdhi],degree=2)
plots.plotl(ax,x,tmpfit(x))
print('hi alpha: ', len(gdhi))
fig.tight_layout()
fig.savefig(out+'.png')
plt.close()
plt.rc('font',size=14)
plt.rc('axes',titlesize=14)
plt.rc('axes',labelsize=14)
fig.savefig(out+'.pdf')
plt.close()
# auxiliary plots with different color-codings
try:
meanfib=allstar['MEANFIB']
except:
meanfib=teff*0.
fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001,figsize=(12,8))
plots.plotc(ax[0,0],mh,teff-ghb,logg,zr=[0,5],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',colorbar=True,zt='log g',size=2)
plots.plotc(ax[0,1],mh,teff-ghb,meanfib,zr=[0,300],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',colorbar=True,zt='mean fiber',size=2)
pfit = fit.fit1d(mh,teff-ghb,ydata=teff,plot=ax[1,0],zr=[-500,200],xt='[M/H]',yt='$\Delta Teff$',xr=[-2.7,0.9],yr=[3500,5000],colorbar=True,zt='Teff')
pfit = fit.fit1d(teff,teff-ghb,ydata=mh,plot=ax[1,1],zr=[-500,200],xt='Teff',yt='$\Delta Teff$',xr=trange,yr=[-2.5,0.5],colorbar=True,zt='[M/H]')
fig.tight_layout()
fig.savefig(out+'_b.png')
plt.close()
# do some test 2D and 1D fits and plots
#fig,ax=plots.multi(2,2,hspace=0.5,wspace=0.001)
#ax[0,1].xaxis.set_visible(False)
#ax[0,1].yaxis.set_visible(False)
#pfit = fit.fit2d(allstar[param][:,3],allstar[param][:,0],allstar[param][:,0]-ghb,plot=ax[0,0],zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\Delta Teff$')
#pfit = fit.fit1d(allstar[param][:,3],allstar[param][:,0]-ghb,ydata=allstar[param][:,0],plot=ax[1,0],zr=[-500,200],xt='[M/H]',yt='$\Delta Teff$',xr=[-2.7,0.9],yr=[3500,5000])
#pfit = fit.fit1d(allstar[param][:,0],allstar[param][:,0]-ghb,ydata=allstar[param][:,3],plot=ax[1,1],zr=[-500,200],xt='Teff',xr=[3900,5100],yr=[-2.5,0.5])
plt.draw()
return {'caltemin': 3000., 'caltemax': 100000., 'temin' : trange[0], 'temax': trange[1],
'mhmin': mhrange[0], 'mhmax' : mhrange[1],
'par': tefit.parameters, 'rms' :rms, 'par2d': tefit2.parameters, 'errpar' : errpar}
def irfm(allstar,trange=[4000,5000],mhrange=[-2.5,0.75],out='dteff') :
'''
Compares allstar ASPCPAP Teff with various photometric Teff from JAJ compilation (SAGA, CL, TH, SFD)
Does fits
Args:
allstar : allStar structure
'''
# select stars
gd=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,raw=True)
allstar=allstar[gd]
# get IRFM data
irfm=fits.open(os.environ['APOGEE_DIR']+'/data/calib/irfm_temp.fits')[1].data
# get the subsamples and match. Note that we have to do this separately for each subsample because some
# stars appear in more than one subsample
saga=np.where(irfm['SOURCE'] == 'SAGA')[0]
saga1,saga2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][saga]))
cl=np.where(irfm['SOURCE'] == 'CL')[0]
cl1,cl2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][cl]))
th=np.where(irfm['SOURCE'] == 'TH')[0]
th1,th2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][th]))
sfd=np.where(irfm['SOURCE'] == 'SFD')[0]
sfd1,sfd2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][sfd]))
# plot diff color-coded by gravity as f([M/H])
fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001)
xr=[-3.0,1.0]
yr=[-400,300]
zr=[3500,6000]
bins=np.arange(-2.5,0.75,0.25)
# SAGA
plots.plotc(ax[0,0],allstar['FPARAM'][saga1,3],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],allstar['FPARAM'][saga1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff')
mean=bindata(allstar['FPARAM'][saga1,3],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],bins)
plots.plotp(ax[0,0],bins,mean,marker='o',size=40)
ax[0,0].text(0.1,0.9,'SAGA',transform=ax[0,0].transAxes)
# CL
plots.plotc(ax[0,1],allstar['FPARAM'][cl1,3],allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]],allstar['FPARAM'][cl1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]')
mean=bindata(allstar['FPARAM'][cl1,3],(allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]]),bins)
plots.plotp(ax[0,1],bins,mean,marker='o',size=40)
ax[0,1].text(0.1,0.9,'CL',transform=ax[0,1].transAxes)
# TH
plots.plotc(ax[1,0],allstar['FPARAM'][th1,3],allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]],allstar['FPARAM'][th1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff')
mean=bindata(allstar['FPARAM'][th1,3],(allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]]),bins)
plots.plotp(ax[1,0],bins,mean,marker='o',size=40)
ax[1,0].text(0.1,0.9,'TH',transform=ax[1,0].transAxes)
# SFD
plots.plotc(ax[1,1],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],allstar['FPARAM'][sfd1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]')
mean=bindata(allstar['FPARAM'][sfd1,3],(allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]]),bins)
plots.plotp(ax[1,1],bins,mean,marker='o',size=40)
ax[1,1].text(0.1,0.9,'SFD',transform=ax[1,1].transAxes)
fig.savefig(out+'_mh.png')
# plot diff color-coded by gravity as f([M/H])
fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001)
zr=[-2.0,0.5]
yr=[-400,300]
xr=[6000,3500]
bins=np.arange(3500,5500,250)
# SAGA
plots.plotc(ax[0,0],allstar['FPARAM'][saga1,0],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],allstar['FPARAM'][saga1,3],zr=zr,xr=xr,yr=yr,xt='Teff',yt='ASPCAP-photometric Teff')
mean=bindata(allstar['FPARAM'][saga1,0],(allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]]),bins)
plots.plotp(ax[0,0],bins,mean,marker='o',size=40)
ax[0,0].text(0.1,0.9,'SAGA',transform=ax[0,0].transAxes)
# CL
plots.plotc(ax[0,1],allstar['FPARAM'][cl1,0],allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]],allstar['FPARAM'][cl1,3],zr=zr,xr=xr,yr=yr,xt='Teff')
mean=bindata(allstar['FPARAM'][cl1,0],(allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]]),bins)
plots.plotp(ax[0,1],bins,mean,marker='o',size=40)
ax[0,1].text(0.1,0.9,'CL',transform=ax[0,1].transAxes)
# TH
plots.plotc(ax[1,0],allstar['FPARAM'][th1,0],allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]],allstar['FPARAM'][th1,3],zr=zr,xr=xr,yr=yr,xt='Teff',yt='ASPCAP-photometric Teff')
mean=bindata(allstar['FPARAM'][th1,0],(allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]]),bins)
plots.plotp(ax[1,0],bins,mean,marker='o',size=40)
ax[1,0].text(0.1,0.9,'TH',transform=ax[1,0].transAxes)
# SFD
plots.plotc(ax[1,1],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],allstar['FPARAM'][sfd1,3],zr=zr,xr=xr,yr=yr,xt='Teff')
mean=bindata(allstar['FPARAM'][sfd1,0],(allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]]),bins)
plots.plotp(ax[1,1],bins,mean,marker='o',size=40)
ax[1,1].text(0.1,0.9,'SFD',transform=ax[1,1].transAxes)
fig.savefig(out+'_teff.png')
# do 2D fits with Teff and [M/H], and 1D fits with each
fig,ax=plots.multi(2,2,hspace=0.5,wspace=0.001)
ax[0,1].xaxis.set_visible(False)
ax[0,1].yaxis.set_visible(False)
pfit = fit.fit2d(ax[0,0],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],plot=True,zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\Delta Teff$')
pfit = fit.fit1d(ax[1,0],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],ydata=allstar['FPARAM'][sfd1,0],plot=True,zr=[-500,200],xt='[M/H]',yt='$\Delta Teff$',xr=[-2.7,0.9],yr=[3500,5000])
pfit = fit.fit1d(ax[1,1],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],ydata=allstar['FPARAM'][sfd1,3],plot=True,zr=[-500,200],xt='Teff',xr=[3900,5100],yr=[-2.5,0.5])
pdb.set_trace()
return pfit
def cte_ghb(jk0,feh,dwarf=False) :
"""
Color-temperature relation from Gonzalez Hernandez & Bonifacio (2009): (J-K)_0 - Teff
"""
if dwarf :
b0=0.6524 ; b1=0.5813 ; b2=0.1225 ; b3=-0.0646 ; b4=0.0370 ; b5=0.0016 # dwarfs
else :
b0=0.6517 ; b1=0.6312 ; b2=0.0168 ; b3=-0.0381 ; b4=0.0256 ; b5=0.0013 # giants
theta=b0+b1*jk0+b2*jk0**2+b3*jk0*feh+b4*feh+b5*feh**2
dtheta_djk = b1+2*b2*jk0+b3*feh
dt_djk= -5040./theta**2*dtheta_djk
return 5040./theta, dt_djk
def cal(a,caldir='cal/'):
""" Apply Teff calibration
"""
aspcapmask=bitmask.AspcapBitMask()
parammask=bitmask.ParamBitMask()
starmask=bitmask.StarBitMask()
#populate PARAM[0] for stars w/o STAR_BAD (change to ALL with >=0)
gd=np.where( ((a['ASPCAPFLAG']&aspcapmask.badval()) >= 0) )[0]
gd=np.where( ((a['ASPCAPFLAG']&aspcapmask.getval('NO_ASPCAP_RESULT')) == 0) )[0]
#initial values
a['PARAM'][:,0] = np.nan
a['PARAMFLAG'][gd,0] |= parammask.getval('CALRANGE_BAD')
if caldir == 'none' :
a['PARAM'][gd,0] = a['FPARAM'][gd,0]
a['PARAMFLAG'][gd,0] &= ~parammask.getval('CALRANGE_BAD')
return
calpars=fits.open(caldir+'/tecal.fits')[1].data[0]
calteffmin=calpars['caltemin']
calteffmax=calpars['caltemax']
teff=np.clip(a['FPARAM'][gd,0],calpars['temin'],calpars['temax'])
mh=np.clip(a['FPARAM'][gd,3],calpars['mhmin'],calpars['mhmax'])
try: snr=np.clip(a['SNREV'][gd],0,200.)
except:
print('No SNREV, continnue with SNR?')
pdb.set_trace()
snr=np.clip(a['SNR'][gd],0,200.)
ok =np.where((a['FPARAM'][gd,0] >= calteffmin) & (a['FPARAM'][gd,0] <= calteffmax) )[0]
a['PARAM'][gd[ok],0] = a['FPARAM'][gd[ok],0] - (calpars['par2d'][0]+calpars['par2d'][1]*mh[ok]+calpars['par2d'][2]*teff[ok])
# populate uncertainties with err.apply()
#a['PARAM_COV'][gd[ok],0,0] = err.elemerr(calpars['errpar'],a['FPARAM'][gd[ok],0]-4500.,snr[ok]-100.,a['FPARAM'][gd[ok],3])**2
a['PARAMFLAG'][gd[ok],0] &= ~parammask.getval('CALRANGE_BAD')
return
| 46.26615 | 225 | 0.628093 |
from apogee.utils import apload
from apogee.utils import apselect
from astropy.io import fits, ascii
from tools import match
from tools import plots
from tools import fit
from apogee.utils import bitmask
from apogee.aspcap import err
import pdb
import matplotlib.pyplot as plt
import numpy as np
import os
import matplotlib
def bindata(xdata,ydata,bins,median=True) :
mean=bins*0.
for i in range(len(bins)-1) :
j=np.where((xdata>bins[i]) & (xdata<bins[i+1]))[0]
if median :
mean[i]=np.median(ydata[j])
else :
mean[i]=ydata[j].mean()
return mean
def ghb(allstar,glatmin=30.,ebvmax=0.03,trange=[3750,5500],loggrange=[-1,6],mhrange=[-2.5,0.75],alpha=False,out='teffcomp',yr=[-500,500],
calib=False,dr13=False,grid=None,cmap='rainbow',doerr=True) :
badtarg=['YOUNG','EMBEDDED','EXTENDED','M31','M33','EMISSION','RRLYR','DSPH','MAGCLOUD']
if calib : param='PARAM'
else : param = 'FPARAM'
berger=fits.open(os.environ['APOGEE_DIR']+'/data/calib/teff_berger.fits')[1].data
gd=apselect.select(allstar,badval=['STAR_BAD'],badstar=['MULTIPLE_SUSPECT'],badtarg=badtarg,raw=True)
i1,i2=match.match(allstar['APOGEE_ID'][gd],berger['APOGEE_ID'])
fig,ax=plots.multi(1,1,figsize=(12,6))
plots.plotc(ax,allstar[param][gd[i1],3],allstar[param][gd[i1],0]-berger['TEFF'][i2],allstar[param][gd[i1],0],
xt='[M/H]',yt='ASPCAP-Berger',zt='Teff',xr=[-3,1],yr=[-500,500],zr=[4500,7000],colorbar=True)
ax.grid()
fig.savefig(out+'_berger_mh.png')
plt.close()
fig,ax=plots.multi(1,1,figsize=(12,6))
plots.plotc(ax,allstar[param][gd[i1],0],allstar[param][gd[i1],1],allstar[param][gd[i1],0]-berger['TEFF'][i2],
xt='Teff',yt='log ',zt='ASPCAP-Berger',xr=[8000,3000],yr=[6,-1],zr=[-250,250],colorbar=True)
ax.grid()
fig.savefig(out+'_berger_hr.png')
plt.close()
gd=apselect.select(allstar,badval=['STAR_BAD'],badstar=['MULTIPLE_SUSPECT'],badtarg=badtarg,teff=trange,mh=mhrange,logg=loggrange,raw=True)
allstar=allstar[gd]
j=np.where((abs(allstar['GLAT'])>glatmin)&(allstar['SFD_EBV']>-0.01)&(allstar['SFD_EBV']<ebvmax)&(abs(allstar['J'])<90)&(abs(allstar['K'])<90))[0]
gcstars = ascii.read(os.environ['APOGEE_DIR']+'/data/calib/gc_szabolcs.dat')
bd=np.where(gcstars['pop'] != 1)[0]
j = [x for x in j if allstar[x]['APOGEE_ID'] not in gcstars['id'][bd]]
allstar=allstar[j]
ghb,dtdjk=cte_ghb(allstar['J']-allstar['K'],allstar['FPARAM'][:,3],dwarf=False)
ghb_dwarf,dtdjk_dwarf=cte_ghb(allstar['J']-allstar['K'],allstar['FPARAM'][:,3],dwarf=True)
dw=np.where(allstar['FPARAM'][:,1] > 3.8)[0]
ghb[dw]=ghb_dwarf[dw]
dtdjk[dw]=dtdjk_dwarf[dw]
gd=np.where(abs(allstar['FPARAM'][:,0]-ghb) < 500)[0]
ghb=ghb[gd]
dtdjk=dtdjk[gd]
allstar=allstar[gd]
print('Teff calibration, number of stars: ', len(allstar))
if calib :
param='PARAM'
teff=allstar[param][:,0]
logg=allstar[param][:,1]
mh=allstar[param][:,3]
am=allstar[param][:,6]
elif grid is None :
param='FPARAM'
teff=allstar[param][:,0]
logg=allstar[param][:,1]
mh=allstar[param][:,3]
am=allstar[param][:,6]
else :
param='FPARAM_CLASS'
teff=allstar[param][:,grid,0]
logg=allstar[param][:,grid,1]
mh=allstar[param][:,grid,3]
am=allstar[param][:,grid,6]
out=out+'_grid{:1d}'.format(grid)
fig,ax=plots.multi(1,1,figsize=(12,6))
plots.plotc(ax,teff,logg,teff-ghb, xt='Teff',yt='log ',zt='ASPCAP-GHB',xr=[8000,3000],yr=[6,-1],zr=[-250,250],colorbar=True)
ax.grid()
fig.savefig(out+'_ghb_hr.png')
plt.close()
fig,ax=plots.multi(1,1,hspace=0.001,wspace=0.001,figsize=(12,6))
xr=[-3.0,1.0]
zr=trange
if dr13: zr=[3500,5500]
binsize=0.25
bins=np.arange(-2.5,0.75,binsize)
if alpha :
plots.plotc(ax,mh,teff-ghb,am,zr=[-0.1,0.4],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',
colorbar=True,zt=r'[$\alpha$/M]',rasterized=True,cmap=cmap)
else :
plots.plotc(ax,mh,teff-ghb,teff,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',
colorbar=True,zt='$T_{eff}$',rasterized=True,zr=trange,cmap=cmap)
ax.grid()
mean=bindata(mh,teff-ghb,bins,median=False)
if not dr13: plots.plotp(ax,bins+binsize/2.,mean,marker='o',size=40)
mean=bindata(mh,teff-ghb,bins,median=True)
if not dr13: plots.plotp(ax,bins+binsize/2.,mean,marker='o',size=40,color='b')
ax.text(0.1,0.9,'E(B-V)<{:6.2f}'.format(ebvmax),transform=ax.transAxes)
gd=np.where(np.isfinite(mean))[0]
tefit = fit.fit1d(bins[gd]+binsize/2.,mean[gd],degree=2,reject=0)
allfit = fit.fit1d(mh,teff-ghb,ydata=teff,degree=2,reject=0)
fig2,ax2=plots.multi(1,1)
tefit2 = fit.fit2d(mh,teff,teff-ghb,reject=0,plot=ax2,zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\Delta Teff$')
if doerr:
errpar = err.errfit(teff,allstar['SNR'],mh,teff-tefit(mh)-ghb,title='Teff',out=out,zr=[0,150])
else: errpar=0.
x=np.linspace(-3,1,200)
rms = (teff-tefit(mh)-ghb).std()
if dr13:
plots.plotl(ax,x,-36.17+95.97*x-15.09*x**2,color='k')
print(allfit)
else :
plots.plotl(ax,x,tefit(x),color='k')
ax.text(0.98,0.9,'rms: {:6.1f}'.format(rms),transform=ax.transAxes,ha='right')
cmap = matplotlib.cm.get_cmap(cmap)
for t in np.arange(trange[0],trange[1],500.) :
rgba=cmap((t-trange[0])/(trange[1]-trange[0]))
y=x*0.+t
plots.plotl(ax,x,tefit2(x,y),color=rgba)
plots._data_x = mh
plots._data_y = teff-ghb
plots._data = allstar
plots.event(fig)
if alpha :
gdlo=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,logg=[0,3.8],alpha=[-0.1,0.1],raw=True)
mean=bindata(mh[gdlo],teff[gdlo]-ghb[gdlo],bins)
plots.plotp(ax,bins,mean,marker='o',size=40,color='g')
tmpfit = fit.fit1d(mh[gdlo],teff[gdlo]-ghb[gdlo],ydata=teff[gdlo],degree=2)
plots.plotl(ax,x,tmpfit(x))
print('low alpha: ', len(gdlo))
gdhi=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,logg=[0,3.8],alpha=[0.1,0.5],raw=True)
mean=bindata(mh[gdhi],teff[gdhi]-ghb[gdhi],bins)
plots.plotp(ax,bins,mean,marker='o',size=40,color='b')
tmpfit = fit.fit1d(mh[gdhi],teff[gdhi]-ghb[gdhi],ydata=teff[gdhi],degree=2)
plots.plotl(ax,x,tmpfit(x))
print('hi alpha: ', len(gdhi))
fig.tight_layout()
fig.savefig(out+'.png')
plt.close()
plt.rc('font',size=14)
plt.rc('axes',titlesize=14)
plt.rc('axes',labelsize=14)
fig.savefig(out+'.pdf')
plt.close()
try:
meanfib=allstar['MEANFIB']
except:
meanfib=teff*0.
fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001,figsize=(12,8))
plots.plotc(ax[0,0],mh,teff-ghb,logg,zr=[0,5],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',colorbar=True,zt='log g',size=2)
plots.plotc(ax[0,1],mh,teff-ghb,meanfib,zr=[0,300],xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff',colorbar=True,zt='mean fiber',size=2)
pfit = fit.fit1d(mh,teff-ghb,ydata=teff,plot=ax[1,0],zr=[-500,200],xt='[M/H]',yt='$\Delta Teff$',xr=[-2.7,0.9],yr=[3500,5000],colorbar=True,zt='Teff')
pfit = fit.fit1d(teff,teff-ghb,ydata=mh,plot=ax[1,1],zr=[-500,200],xt='Teff',yt='$\Delta Teff$',xr=trange,yr=[-2.5,0.5],colorbar=True,zt='[M/H]')
fig.tight_layout()
fig.savefig(out+'_b.png')
plt.close()
plt.draw()
return {'caltemin': 3000., 'caltemax': 100000., 'temin' : trange[0], 'temax': trange[1],
'mhmin': mhrange[0], 'mhmax' : mhrange[1],
'par': tefit.parameters, 'rms' :rms, 'par2d': tefit2.parameters, 'errpar' : errpar}
def irfm(allstar,trange=[4000,5000],mhrange=[-2.5,0.75],out='dteff') :
gd=apselect.select(allstar,badval=['STAR_BAD'],teff=trange,mh=mhrange,raw=True)
allstar=allstar[gd]
irfm=fits.open(os.environ['APOGEE_DIR']+'/data/calib/irfm_temp.fits')[1].data
saga=np.where(irfm['SOURCE'] == 'SAGA')[0]
saga1,saga2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][saga]))
cl=np.where(irfm['SOURCE'] == 'CL')[0]
cl1,cl2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][cl]))
th=np.where(irfm['SOURCE'] == 'TH')[0]
th1,th2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][th]))
sfd=np.where(irfm['SOURCE'] == 'SFD')[0]
sfd1,sfd2=match.match(np.chararray.strip(allstar['APOGEE_ID']),np.chararray.strip(irfm['2MASS ID'][sfd]))
fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001)
xr=[-3.0,1.0]
yr=[-400,300]
zr=[3500,6000]
bins=np.arange(-2.5,0.75,0.25)
plots.plotc(ax[0,0],allstar['FPARAM'][saga1,3],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],allstar['FPARAM'][saga1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff')
mean=bindata(allstar['FPARAM'][saga1,3],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],bins)
plots.plotp(ax[0,0],bins,mean,marker='o',size=40)
ax[0,0].text(0.1,0.9,'SAGA',transform=ax[0,0].transAxes)
plots.plotc(ax[0,1],allstar['FPARAM'][cl1,3],allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]],allstar['FPARAM'][cl1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]')
mean=bindata(allstar['FPARAM'][cl1,3],(allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]]),bins)
plots.plotp(ax[0,1],bins,mean,marker='o',size=40)
ax[0,1].text(0.1,0.9,'CL',transform=ax[0,1].transAxes)
plots.plotc(ax[1,0],allstar['FPARAM'][th1,3],allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]],allstar['FPARAM'][th1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]',yt='ASPCAP-photometric Teff')
mean=bindata(allstar['FPARAM'][th1,3],(allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]]),bins)
plots.plotp(ax[1,0],bins,mean,marker='o',size=40)
ax[1,0].text(0.1,0.9,'TH',transform=ax[1,0].transAxes)
plots.plotc(ax[1,1],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],allstar['FPARAM'][sfd1,0],zr=zr,xr=xr,yr=yr,xt='[M/H]')
mean=bindata(allstar['FPARAM'][sfd1,3],(allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]]),bins)
plots.plotp(ax[1,1],bins,mean,marker='o',size=40)
ax[1,1].text(0.1,0.9,'SFD',transform=ax[1,1].transAxes)
fig.savefig(out+'_mh.png')
fig,ax=plots.multi(2,2,hspace=0.001,wspace=0.001)
zr=[-2.0,0.5]
yr=[-400,300]
xr=[6000,3500]
bins=np.arange(3500,5500,250)
plots.plotc(ax[0,0],allstar['FPARAM'][saga1,0],allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]],allstar['FPARAM'][saga1,3],zr=zr,xr=xr,yr=yr,xt='Teff',yt='ASPCAP-photometric Teff')
mean=bindata(allstar['FPARAM'][saga1,0],(allstar['FPARAM'][saga1,0]-irfm['IRFM TEFF'][saga[saga2]]),bins)
plots.plotp(ax[0,0],bins,mean,marker='o',size=40)
ax[0,0].text(0.1,0.9,'SAGA',transform=ax[0,0].transAxes)
plots.plotc(ax[0,1],allstar['FPARAM'][cl1,0],allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]],allstar['FPARAM'][cl1,3],zr=zr,xr=xr,yr=yr,xt='Teff')
mean=bindata(allstar['FPARAM'][cl1,0],(allstar['FPARAM'][cl1,0]-irfm['IRFM TEFF'][cl[cl2]]),bins)
plots.plotp(ax[0,1],bins,mean,marker='o',size=40)
ax[0,1].text(0.1,0.9,'CL',transform=ax[0,1].transAxes)
plots.plotc(ax[1,0],allstar['FPARAM'][th1,0],allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]],allstar['FPARAM'][th1,3],zr=zr,xr=xr,yr=yr,xt='Teff',yt='ASPCAP-photometric Teff')
mean=bindata(allstar['FPARAM'][th1,0],(allstar['FPARAM'][th1,0]-irfm['IRFM TEFF'][th[th2]]),bins)
plots.plotp(ax[1,0],bins,mean,marker='o',size=40)
ax[1,0].text(0.1,0.9,'TH',transform=ax[1,0].transAxes)
plots.plotc(ax[1,1],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],allstar['FPARAM'][sfd1,3],zr=zr,xr=xr,yr=yr,xt='Teff')
mean=bindata(allstar['FPARAM'][sfd1,0],(allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]]),bins)
plots.plotp(ax[1,1],bins,mean,marker='o',size=40)
ax[1,1].text(0.1,0.9,'SFD',transform=ax[1,1].transAxes)
fig.savefig(out+'_teff.png')
fig,ax=plots.multi(2,2,hspace=0.5,wspace=0.001)
ax[0,1].xaxis.set_visible(False)
ax[0,1].yaxis.set_visible(False)
pfit = fit.fit2d(ax[0,0],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],plot=True,zr=[-500,200],xt='[M/H]',yt=['Teff'],zt='$\Delta Teff$')
pfit = fit.fit1d(ax[1,0],allstar['FPARAM'][sfd1,3],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],ydata=allstar['FPARAM'][sfd1,0],plot=True,zr=[-500,200],xt='[M/H]',yt='$\Delta Teff$',xr=[-2.7,0.9],yr=[3500,5000])
pfit = fit.fit1d(ax[1,1],allstar['FPARAM'][sfd1,0],allstar['FPARAM'][sfd1,0]-irfm['IRFM TEFF'][sfd[sfd2]],ydata=allstar['FPARAM'][sfd1,3],plot=True,zr=[-500,200],xt='Teff',xr=[3900,5100],yr=[-2.5,0.5])
pdb.set_trace()
return pfit
def cte_ghb(jk0,feh,dwarf=False) :
if dwarf :
b0=0.6524 ; b1=0.5813 ; b2=0.1225 ; b3=-0.0646 ; b4=0.0370 ; b5=0.0016
else :
b0=0.6517 ; b1=0.6312 ; b2=0.0168 ; b3=-0.0381 ; b4=0.0256 ; b5=0.0013
theta=b0+b1*jk0+b2*jk0**2+b3*jk0*feh+b4*feh+b5*feh**2
dtheta_djk = b1+2*b2*jk0+b3*feh
dt_djk= -5040./theta**2*dtheta_djk
return 5040./theta, dt_djk
def cal(a,caldir='cal/'):
aspcapmask=bitmask.AspcapBitMask()
parammask=bitmask.ParamBitMask()
starmask=bitmask.StarBitMask()
gd=np.where( ((a['ASPCAPFLAG']&aspcapmask.badval()) >= 0) )[0]
gd=np.where( ((a['ASPCAPFLAG']&aspcapmask.getval('NO_ASPCAP_RESULT')) == 0) )[0]
a['PARAM'][:,0] = np.nan
a['PARAMFLAG'][gd,0] |= parammask.getval('CALRANGE_BAD')
if caldir == 'none' :
a['PARAM'][gd,0] = a['FPARAM'][gd,0]
a['PARAMFLAG'][gd,0] &= ~parammask.getval('CALRANGE_BAD')
return
calpars=fits.open(caldir+'/tecal.fits')[1].data[0]
calteffmin=calpars['caltemin']
calteffmax=calpars['caltemax']
teff=np.clip(a['FPARAM'][gd,0],calpars['temin'],calpars['temax'])
mh=np.clip(a['FPARAM'][gd,3],calpars['mhmin'],calpars['mhmax'])
try: snr=np.clip(a['SNREV'][gd],0,200.)
except:
print('No SNREV, continnue with SNR?')
pdb.set_trace()
snr=np.clip(a['SNR'][gd],0,200.)
ok =np.where((a['FPARAM'][gd,0] >= calteffmin) & (a['FPARAM'][gd,0] <= calteffmax) )[0]
a['PARAM'][gd[ok],0] = a['FPARAM'][gd[ok],0] - (calpars['par2d'][0]+calpars['par2d'][1]*mh[ok]+calpars['par2d'][2]*teff[ok])
a['PARAMFLAG'][gd[ok],0] &= ~parammask.getval('CALRANGE_BAD')
return
| true | true |
f731c428457d805737a8bac6b920e623e8b9e75c | 34,668 | py | Python | superset/security/manager.py | Visortech-Solutions/incubator-superset | 4b33597e521e07d1ec74cdbda761e103814f60a2 | [
"Apache-2.0"
] | 1 | 2020-08-31T17:22:25.000Z | 2020-08-31T17:22:25.000Z | superset/security/manager.py | Visortech-Solutions/incubator-superset | 4b33597e521e07d1ec74cdbda761e103814f60a2 | [
"Apache-2.0"
] | 1 | 2020-08-02T04:42:57.000Z | 2020-08-02T04:42:57.000Z | superset/security/manager.py | Visortech-Solutions/incubator-superset | 4b33597e521e07d1ec74cdbda761e103814f60a2 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-few-public-methods
"""A set of constants and methods to manage permissions and security"""
import logging
from typing import Any, Callable, cast, List, Optional, Set, Tuple, TYPE_CHECKING, Union
from flask import current_app, g
from flask_appbuilder import Model
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.sqla.models import (
assoc_permissionview_role,
assoc_user_role,
PermissionView,
)
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
RoleModelView,
UserModelView,
ViewMenuModelView,
)
from flask_appbuilder.widgets import ListWidget
from sqlalchemy import or_
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.query import Query as SqlaQuery
from superset import sql_parse
from superset.connectors.connector_registry import ConnectorRegistry
from superset.constants import RouteMethod
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetSecurityException
from superset.utils.core import DatasourceName
if TYPE_CHECKING:
from superset.common.query_context import QueryContext
from superset.connectors.base.models import BaseDatasource
from superset.connectors.druid.models import DruidCluster
from superset.models.core import Database
from superset.models.sql_lab import Query
from superset.sql_parse import Table
from superset.viz import BaseViz
logger = logging.getLogger(__name__)
class SupersetSecurityListWidget(ListWidget):
"""
Redeclaring to avoid circular imports
"""
template = "superset/fab_overrides/list.html"
class SupersetRoleListWidget(ListWidget):
"""
Role model view from FAB already uses a custom list widget override
So we override the override
"""
template = "superset/fab_overrides/list_role.html"
def __init__(self, **kwargs: Any) -> None:
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
UserModelView.list_widget = SupersetSecurityListWidget
RoleModelView.list_widget = SupersetRoleListWidget
PermissionViewModelView.list_widget = SupersetSecurityListWidget
PermissionModelView.list_widget = SupersetSecurityListWidget
# Limiting routes on FAB model views
UserModelView.include_route_methods = RouteMethod.CRUD_SET | {
RouteMethod.ACTION,
RouteMethod.API_READ,
RouteMethod.ACTION_POST,
"userinfo",
}
RoleModelView.include_route_methods = RouteMethod.CRUD_SET
PermissionViewModelView.include_route_methods = {RouteMethod.LIST}
PermissionModelView.include_route_methods = {RouteMethod.LIST}
ViewMenuModelView.include_route_methods = {RouteMethod.LIST}
RoleModelView.list_columns = ["name"]
RoleModelView.edit_columns = ["name", "permissions", "user"]
RoleModelView.related_views = []
class SupersetSecurityManager( # pylint: disable=too-many-public-methods
SecurityManager
):
userstatschartview = None
READ_ONLY_MODEL_VIEWS = {"DatabaseAsync", "DatabaseView", "DruidClusterModelView"}
USER_MODEL_VIEWS = {
"UserDBModelView",
"UserLDAPModelView",
"UserOAuthModelView",
"UserOIDModelView",
"UserRemoteUserModelView",
}
GAMMA_READ_ONLY_MODEL_VIEWS = {
"SqlMetricInlineView",
"TableColumnInlineView",
"TableModelView",
"DruidColumnInlineView",
"DruidDatasourceModelView",
"DruidMetricInlineView",
"Datasource",
} | READ_ONLY_MODEL_VIEWS
ADMIN_ONLY_VIEW_MENUS = {
"AccessRequestsModelView",
"SQL Lab",
"Refresh Druid Metadata",
"ResetPasswordView",
"RoleModelView",
"LogModelView",
"Security",
"RowLevelSecurityFiltersModelView",
} | USER_MODEL_VIEWS
ALPHA_ONLY_VIEW_MENUS = {
"Manage",
"CSS Templates",
"Queries",
"Import dashboards",
"Upload a CSV",
}
ADMIN_ONLY_PERMISSIONS = {
"can_sql_json", # TODO: move can_sql_json to sql_lab role
"can_override_role_permissions",
"can_sync_druid_source",
"can_override_role_permissions",
"can_approve",
"can_update_role",
"all_query_access",
}
READ_ONLY_PERMISSION = {"can_show", "can_list", "can_get", "can_external_metadata"}
ALPHA_ONLY_PERMISSIONS = {
"muldelete",
"all_database_access",
"all_datasource_access",
}
OBJECT_SPEC_PERMISSIONS = {
"database_access",
"schema_access",
"datasource_access",
"metric_access",
}
ACCESSIBLE_PERMS = {"can_userinfo"}
def get_schema_perm( # pylint: disable=no-self-use
self, database: Union["Database", str], schema: Optional[str] = None
) -> Optional[str]:
"""
Return the database specific schema permission.
:param database: The Superset database or database name
:param schema: The Superset schema name
:return: The database specific schema permission
"""
if schema:
return f"[{database}].[{schema}]"
return None
def unpack_schema_perm( # pylint: disable=no-self-use
self, schema_permission: str
) -> Tuple[str, str]:
# [database_name].[schema_name]
schema_name = schema_permission.split(".")[1][1:-1]
database_name = schema_permission.split(".")[0][1:-1]
return database_name, schema_name
def can_access(self, permission_name: str, view_name: str) -> bool:
"""
Return True if the user can access the FAB permission/view, False otherwise.
Note this method adds protection from has_access failing from missing
permission/view entries.
:param permission_name: The FAB permission name
:param view_name: The FAB view-menu name
:returns: Whether the user can access the FAB permission/view
"""
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
def can_access_all_queries(self) -> bool:
"""
Return True if the user can access all SQL Lab queries, False otherwise.
:returns: Whether the user can access all queries
"""
return self.can_access("all_query_access", "all_query_access")
def can_access_all_datasources(self) -> bool:
"""
Return True if the user can fully access all the Superset datasources, False
otherwise.
:returns: Whether the user can fully access all Superset datasources
"""
return self.can_access("all_datasource_access", "all_datasource_access")
def can_access_all_databases(self) -> bool:
"""
Return True if the user can fully access all the Superset databases, False
otherwise.
:returns: Whether the user can fully access all Superset databases
"""
return self.can_access("all_database_access", "all_database_access")
def can_access_database(self, database: Union["Database", "DruidCluster"]) -> bool:
"""
Return True if the user can fully access the Superset database, False otherwise.
Note for Druid the database is akin to the Druid cluster.
:param database: The Superset database
:returns: Whether the user can fully access the Superset database
"""
return (
self.can_access_all_datasources()
or self.can_access_all_databases()
or self.can_access("database_access", database.perm) # type: ignore
)
def can_access_schema(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can fully access the schema associated with the Superset
datasource, False otherwise.
Note for Druid datasources the database and schema are akin to the Druid cluster
and datasource name prefix respectively, i.e., [schema.]datasource.
:param datasource: The Superset datasource
:returns: Whether the user can fully access the datasource's schema
"""
return (
self.can_access_all_datasources()
or self.can_access_database(datasource.database)
or self.can_access("schema_access", datasource.schema_perm or "")
)
def can_access_datasource(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can fully access of the Superset datasource, False
otherwise.
:param datasource: The Superset datasource
:returns: Whether the user can fully access the Superset datasource
"""
try:
self.raise_for_access(datasource=datasource)
except SupersetSecurityException:
return False
return True
@staticmethod
def get_datasource_access_error_msg(datasource: "BaseDatasource") -> str:
"""
Return the error message for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The error message
"""
return f"""This endpoint requires the datasource {datasource.name}, database or
`all_datasource_access` permission"""
@staticmethod
def get_datasource_access_link( # pylint: disable=unused-argument
datasource: "BaseDatasource",
) -> Optional[str]:
"""
Return the link for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def get_datasource_access_error_object( # pylint: disable=invalid-name
self, datasource: "BaseDatasource"
) -> SupersetError:
"""
Return the error object for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The error object
"""
return SupersetError(
error_type=SupersetErrorType.DATASOURCE_SECURITY_ACCESS_ERROR,
message=self.get_datasource_access_error_msg(datasource),
level=ErrorLevel.ERROR,
extra={
"link": self.get_datasource_access_link(datasource),
"datasource": datasource.name,
},
)
def get_table_access_error_msg( # pylint: disable=no-self-use
self, tables: Set["Table"]
) -> str:
"""
Return the error message for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The error message
"""
quoted_tables = [f"`{table}`" for table in tables]
return f"""You need access to the following tables: {", ".join(quoted_tables)},
`all_database_access` or `all_datasource_access` permission"""
def get_table_access_error_object(self, tables: Set["Table"]) -> SupersetError:
"""
Return the error object for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The error object
"""
return SupersetError(
error_type=SupersetErrorType.TABLE_SECURITY_ACCESS_ERROR,
message=self.get_table_access_error_msg(tables),
level=ErrorLevel.ERROR,
extra={
"link": self.get_table_access_link(tables),
"tables": [str(table) for table in tables],
},
)
def get_table_access_link( # pylint: disable=unused-argument,no-self-use
self, tables: Set["Table"]
) -> Optional[str]:
"""
Return the access link for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def can_access_table(self, database: "Database", table: "Table") -> bool:
"""
Return True if the user can access the SQL table, False otherwise.
:param database: The SQL database
:param table: The SQL table
:returns: Whether the user can access the SQL table
"""
try:
self.raise_for_access(database=database, table=table)
except SupersetSecurityException:
return False
return True
def get_public_role(self) -> Optional[Any]: # Optional[self.role_model]
from superset import conf
if not conf.get("PUBLIC_ROLE_LIKE_GAMMA", False):
return None
return (
self.get_session.query(self.role_model)
.filter_by(name="Public")
.one_or_none()
)
def user_view_menu_names(self, permission_name: str) -> Set[str]:
base_query = (
self.get_session.query(self.viewmenu_model.name)
.join(self.permissionview_model)
.join(self.permission_model)
.join(assoc_permissionview_role)
.join(self.role_model)
)
if not g.user.is_anonymous:
# filter by user id
view_menu_names = (
base_query.join(assoc_user_role)
.join(self.user_model)
.filter(self.user_model.id == g.user.id)
.filter(self.permission_model.name == permission_name)
).all()
return {s.name for s in view_menu_names}
# Properly treat anonymous user
public_role = self.get_public_role()
if public_role:
# filter by public role
view_menu_names = (
base_query.filter(self.role_model.id == public_role.id).filter(
self.permission_model.name == permission_name
)
).all()
return {s.name for s in view_menu_names}
return set()
def get_schemas_accessible_by_user(
self, database: "Database", schemas: List[str], hierarchical: bool = True
) -> List[str]:
"""
Return the list of SQL schemas accessible by the user.
:param database: The SQL database
:param schemas: The list of eligible SQL schemas
:param hierarchical: Whether to check using the hierarchical permission logic
:returns: The list of accessible SQL schemas
"""
from superset.connectors.sqla.models import SqlaTable
if hierarchical and self.can_access_database(database):
return schemas
# schema_access
accessible_schemas = {
self.unpack_schema_perm(s)[1]
for s in self.user_view_menu_names("schema_access")
if s.startswith(f"[{database}].")
}
# datasource_access
perms = self.user_view_menu_names("datasource_access")
if perms:
tables = (
self.get_session.query(SqlaTable.schema)
.filter(SqlaTable.database_id == database.id)
.filter(SqlaTable.schema.isnot(None))
.filter(SqlaTable.schema != "")
.filter(or_(SqlaTable.perm.in_(perms)))
.distinct()
)
accessible_schemas.update([table.schema for table in tables])
return [s for s in schemas if s in accessible_schemas]
def get_datasources_accessible_by_user( # pylint: disable=invalid-name
self,
database: "Database",
datasource_names: List[DatasourceName],
schema: Optional[str] = None,
) -> List[DatasourceName]:
"""
Return the list of SQL tables accessible by the user.
:param database: The SQL database
:param datasource_names: The list of eligible SQL tables w/ schema
:param schema: The fallback SQL schema if not present in the table name
:returns: The list of accessible SQL tables w/ schema
"""
if self.can_access_database(database):
return datasource_names
if schema:
schema_perm = self.get_schema_perm(database, schema)
if schema_perm and self.can_access("schema_access", schema_perm):
return datasource_names
user_perms = self.user_view_menu_names("datasource_access")
schema_perms = self.user_view_menu_names("schema_access")
user_datasources = ConnectorRegistry.query_datasources_by_permissions(
database, user_perms, schema_perms
)
if schema:
names = {d.table_name for d in user_datasources if d.schema == schema}
return [d for d in datasource_names if d in names]
full_names = {d.full_name for d in user_datasources}
return [d for d in datasource_names if f"[{database}].[{d}]" in full_names]
def merge_perm(self, permission_name: str, view_menu_name: str) -> None:
"""
Add the FAB permission/view-menu.
:param permission_name: The FAB permission name
:param view_menu_names: The FAB view-menu name
:see: SecurityManager.add_permission_view_menu
"""
logger.warning(
"This method 'merge_perm' is deprecated use add_permission_view_menu"
)
self.add_permission_view_menu(permission_name, view_menu_name)
def _is_user_defined_permission(self, perm: Model) -> bool:
"""
Return True if the FAB permission is user defined, False otherwise.
:param perm: The FAB permission
:returns: Whether the FAB permission is user defined
"""
return perm.permission.name in self.OBJECT_SPEC_PERMISSIONS
def create_custom_permissions(self) -> None:
"""
Create custom FAB permissions.
"""
self.add_permission_view_menu("all_datasource_access", "all_datasource_access")
self.add_permission_view_menu("all_database_access", "all_database_access")
self.add_permission_view_menu("all_query_access", "all_query_access")
def create_missing_perms(self) -> None:
"""
Creates missing FAB permissions for datasources, schemas and metrics.
"""
from superset.connectors.base.models import BaseMetric
from superset.models import core as models
logger.info("Fetching a set of all perms to lookup which ones are missing")
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu: str, perm: str) -> None:
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.add_permission_view_menu(view_menu, perm)
logger.info("Creating missing datasource permissions.")
datasources = ConnectorRegistry.get_all_datasources()
for datasource in datasources:
merge_pv("datasource_access", datasource.get_perm())
merge_pv("schema_access", datasource.get_schema_perm())
logger.info("Creating missing database permissions.")
databases = self.get_session.query(models.Database).all()
for database in databases:
merge_pv("database_access", database.perm)
logger.info("Creating missing metrics permissions")
metrics: List[BaseMetric] = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(self.get_session.query(datasource_class.metric_class).all())
def clean_perms(self) -> None:
"""
Clean up the FAB faulty permissions.
"""
logger.info("Cleaning faulty perms")
sesh = self.get_session
pvms = sesh.query(PermissionView).filter(
or_(
PermissionView.permission # pylint: disable=singleton-comparison
== None,
PermissionView.view_menu # pylint: disable=singleton-comparison
== None,
)
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logger.info("Deleted %i faulty permissions", deleted_count)
def sync_role_definitions(self) -> None:
"""
Initialize the Superset application with security roles and such.
"""
from superset import conf
logger.info("Syncing role definition")
self.create_custom_permissions()
# Creating default roles
self.set_role("Admin", self._is_admin_pvm)
self.set_role("Alpha", self._is_alpha_pvm)
self.set_role("Gamma", self._is_gamma_pvm)
self.set_role("granter", self._is_granter_pvm)
self.set_role("sql_lab", self._is_sql_lab_pvm)
if conf.get("PUBLIC_ROLE_LIKE_GAMMA", False):
self.set_role("Public", self._is_gamma_pvm)
self.create_missing_perms()
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
def set_role(
self, role_name: str, pvm_check: Callable[[PermissionView], bool]
) -> None:
"""
Set the FAB permission/views for the role.
:param role_name: The FAB role name
:param pvm_check: The FAB permission/view check
"""
logger.info("Syncing %s perms", role_name)
sesh = self.get_session
pvms = sesh.query(PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.add_role(role_name)
role_pvms = [p for p in pvms if pvm_check(p)]
role.permissions = role_pvms
sesh.merge(role)
sesh.commit()
def _is_admin_only(self, pvm: Model) -> bool:
"""
Return True if the FAB permission/view is accessible to only Admin users,
False otherwise.
Note readonly operations on read only model views are allowed only for admins.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to only Admin users
"""
if (
pvm.view_menu.name in self.READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ADMIN_ONLY_VIEW_MENUS
or pvm.permission.name in self.ADMIN_ONLY_PERMISSIONS
)
def _is_alpha_only(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is accessible to only Alpha users,
False otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to only Alpha users
"""
if (
pvm.view_menu.name in self.GAMMA_READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ALPHA_ONLY_VIEW_MENUS
or pvm.permission.name in self.ALPHA_ONLY_PERMISSIONS
)
def _is_accessible_to_all(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is accessible to all, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to all users
"""
return pvm.permission.name in self.ACCESSIBLE_PERMS
def _is_admin_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Admin user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Admin related
"""
return not self._is_user_defined_permission(pvm)
def _is_alpha_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Alpha user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Alpha related
"""
return not (
self._is_user_defined_permission(pvm) or self._is_admin_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_gamma_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Gamma user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Gamma related
"""
return not (
self._is_user_defined_permission(pvm)
or self._is_admin_only(pvm)
or self._is_alpha_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_sql_lab_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is SQL Lab related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is SQL Lab related
"""
return (
pvm.view_menu.name
in {"SQL Lab", "SQL Editor", "Query Search", "Saved Queries"}
or pvm.permission.name
in {
"can_sql_json",
"can_csv",
"can_search_queries",
"can_sqllab_viz",
"can_sqllab_table_viz",
"can_sqllab",
}
or (
pvm.view_menu.name in self.USER_MODEL_VIEWS
and pvm.permission.name == "can_list"
)
)
def _is_granter_pvm( # pylint: disable=no-self-use
self, pvm: PermissionModelView
) -> bool:
"""
Return True if the user can grant the FAB permission/view, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the user can grant the FAB permission/view
"""
return pvm.permission.name in {"can_override_role_permissions", "can_approve"}
def set_perm( # pylint: disable=no-self-use,unused-argument
self, mapper: Mapper, connection: Connection, target: "BaseDatasource"
) -> None:
"""
Set the datasource permissions.
:param mapper: The table mapper
:param connection: The DB-API connection
:param target: The mapped instance being persisted
"""
link_table = target.__table__ # pylint: disable=no-member
if target.perm != target.get_perm():
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(perm=target.get_perm())
)
if (
hasattr(target, "schema_perm")
and target.schema_perm != target.get_schema_perm()
):
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(schema_perm=target.get_schema_perm())
)
pvm_names = []
if target.__tablename__ in {"dbs", "clusters"}:
pvm_names.append(("database_access", target.get_perm()))
else:
pvm_names.append(("datasource_access", target.get_perm()))
if target.schema:
pvm_names.append(("schema_access", target.get_schema_perm()))
# TODO(bogdan): modify slice permissions as well.
for permission_name, view_menu_name in pvm_names:
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if not permission:
permission_table = (
self.permission_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_table.insert().values(name=permission_name)
)
permission = self.find_permission(permission_name)
if not view_menu:
view_menu_table = (
self.viewmenu_model.__table__ # pylint: disable=no-member
)
connection.execute(view_menu_table.insert().values(name=view_menu_name))
view_menu = self.find_view_menu(view_menu_name)
if permission and view_menu:
pv = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
if not pv and permission and view_menu:
permission_view_table = (
self.permissionview_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_view_table.insert().values(
permission_id=permission.id, view_menu_id=view_menu.id
)
)
def raise_for_access( # pylint: disable=too-many-arguments,too-many-branches
self,
database: Optional["Database"] = None,
datasource: Optional["BaseDatasource"] = None,
query: Optional["Query"] = None,
query_context: Optional["QueryContext"] = None,
table: Optional["Table"] = None,
viz: Optional["BaseViz"] = None,
) -> None:
"""
Raise an exception if the user cannot access the resource.
:param database: The Superset database
:param datasource: The Superset datasource
:param query: The SQL Lab query
:param query_context: The query context
:param table: The Superset table (requires database)
:param viz: The visualization
:raises SupersetSecurityException: If the user cannot access the resource
"""
from superset.connectors.sqla.models import SqlaTable
from superset.sql_parse import Table
if database and table or query:
if query:
database = query.database
database = cast("Database", database)
if self.can_access_database(database):
return
if query:
tables = {
Table(table_.table, table_.schema or query.schema)
for table_ in sql_parse.ParsedQuery(query.sql).tables
}
elif table:
tables = {table}
denied = set()
for table_ in tables:
schema_perm = self.get_schema_perm(database, schema=table_.schema)
if not (schema_perm and self.can_access("schema_access", schema_perm)):
datasources = SqlaTable.query_datasources_by_name(
database, table_.table, schema=table_.schema
)
# Access to any datasource is suffice.
for datasource_ in datasources:
if self.can_access("datasource_access", datasource_.perm):
break
else:
denied.add(table_)
if denied:
raise SupersetSecurityException(
self.get_table_access_error_object(denied)
)
if datasource or query_context or viz:
if query_context:
datasource = query_context.datasource
elif viz:
datasource = viz.datasource
assert datasource
if not (
self.can_access_schema(datasource)
or self.can_access("datasource_access", datasource.perm or "")
):
raise SupersetSecurityException(
self.get_datasource_access_error_object(datasource)
)
def get_rls_filters( # pylint: disable=no-self-use
self, table: "BaseDatasource"
) -> List[SqlaQuery]:
"""
Retrieves the appropriate row level security filters for the current user and
the passed table.
:param table: The table to check against
:returns: A list of filters
"""
if hasattr(g, "user") and hasattr(g.user, "id"):
from superset.connectors.sqla.models import (
RLSFilterRoles,
RLSFilterTables,
RowLevelSecurityFilter,
)
user_roles = (
self.get_session.query(assoc_user_role.c.role_id)
.filter(assoc_user_role.c.user_id == g.user.id)
.subquery()
)
filter_roles = (
self.get_session.query(RLSFilterRoles.c.rls_filter_id)
.filter(RLSFilterRoles.c.role_id.in_(user_roles))
.subquery()
)
filter_tables = (
self.get_session.query(RLSFilterTables.c.rls_filter_id)
.filter(RLSFilterTables.c.table_id == table.id)
.subquery()
)
query = (
self.get_session.query(
RowLevelSecurityFilter.id, RowLevelSecurityFilter.clause
)
.filter(RowLevelSecurityFilter.id.in_(filter_tables))
.filter(RowLevelSecurityFilter.id.in_(filter_roles))
)
return query.all()
return []
def get_rls_ids(self, table: "BaseDatasource") -> List[int]:
"""
Retrieves the appropriate row level security filters IDs for the current user
and the passed table.
:param table: The table to check against
:returns: A list of IDs
"""
ids = [f.id for f in self.get_rls_filters(table)]
ids.sort() # Combinations rather than permutations
return ids
| 35.089069 | 88 | 0.625937 |
import logging
from typing import Any, Callable, cast, List, Optional, Set, Tuple, TYPE_CHECKING, Union
from flask import current_app, g
from flask_appbuilder import Model
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.sqla.models import (
assoc_permissionview_role,
assoc_user_role,
PermissionView,
)
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
RoleModelView,
UserModelView,
ViewMenuModelView,
)
from flask_appbuilder.widgets import ListWidget
from sqlalchemy import or_
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.query import Query as SqlaQuery
from superset import sql_parse
from superset.connectors.connector_registry import ConnectorRegistry
from superset.constants import RouteMethod
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetSecurityException
from superset.utils.core import DatasourceName
if TYPE_CHECKING:
from superset.common.query_context import QueryContext
from superset.connectors.base.models import BaseDatasource
from superset.connectors.druid.models import DruidCluster
from superset.models.core import Database
from superset.models.sql_lab import Query
from superset.sql_parse import Table
from superset.viz import BaseViz
logger = logging.getLogger(__name__)
class SupersetSecurityListWidget(ListWidget):
template = "superset/fab_overrides/list.html"
class SupersetRoleListWidget(ListWidget):
template = "superset/fab_overrides/list_role.html"
def __init__(self, **kwargs: Any) -> None:
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
UserModelView.list_widget = SupersetSecurityListWidget
RoleModelView.list_widget = SupersetRoleListWidget
PermissionViewModelView.list_widget = SupersetSecurityListWidget
PermissionModelView.list_widget = SupersetSecurityListWidget
UserModelView.include_route_methods = RouteMethod.CRUD_SET | {
RouteMethod.ACTION,
RouteMethod.API_READ,
RouteMethod.ACTION_POST,
"userinfo",
}
RoleModelView.include_route_methods = RouteMethod.CRUD_SET
PermissionViewModelView.include_route_methods = {RouteMethod.LIST}
PermissionModelView.include_route_methods = {RouteMethod.LIST}
ViewMenuModelView.include_route_methods = {RouteMethod.LIST}
RoleModelView.list_columns = ["name"]
RoleModelView.edit_columns = ["name", "permissions", "user"]
RoleModelView.related_views = []
class SupersetSecurityManager(
SecurityManager
):
userstatschartview = None
READ_ONLY_MODEL_VIEWS = {"DatabaseAsync", "DatabaseView", "DruidClusterModelView"}
USER_MODEL_VIEWS = {
"UserDBModelView",
"UserLDAPModelView",
"UserOAuthModelView",
"UserOIDModelView",
"UserRemoteUserModelView",
}
GAMMA_READ_ONLY_MODEL_VIEWS = {
"SqlMetricInlineView",
"TableColumnInlineView",
"TableModelView",
"DruidColumnInlineView",
"DruidDatasourceModelView",
"DruidMetricInlineView",
"Datasource",
} | READ_ONLY_MODEL_VIEWS
ADMIN_ONLY_VIEW_MENUS = {
"AccessRequestsModelView",
"SQL Lab",
"Refresh Druid Metadata",
"ResetPasswordView",
"RoleModelView",
"LogModelView",
"Security",
"RowLevelSecurityFiltersModelView",
} | USER_MODEL_VIEWS
ALPHA_ONLY_VIEW_MENUS = {
"Manage",
"CSS Templates",
"Queries",
"Import dashboards",
"Upload a CSV",
}
ADMIN_ONLY_PERMISSIONS = {
"can_sql_json",
"can_override_role_permissions",
"can_sync_druid_source",
"can_override_role_permissions",
"can_approve",
"can_update_role",
"all_query_access",
}
READ_ONLY_PERMISSION = {"can_show", "can_list", "can_get", "can_external_metadata"}
ALPHA_ONLY_PERMISSIONS = {
"muldelete",
"all_database_access",
"all_datasource_access",
}
OBJECT_SPEC_PERMISSIONS = {
"database_access",
"schema_access",
"datasource_access",
"metric_access",
}
ACCESSIBLE_PERMS = {"can_userinfo"}
def get_schema_perm(
self, database: Union["Database", str], schema: Optional[str] = None
) -> Optional[str]:
if schema:
return f"[{database}].[{schema}]"
return None
def unpack_schema_perm(
self, schema_permission: str
) -> Tuple[str, str]:
schema_name = schema_permission.split(".")[1][1:-1]
database_name = schema_permission.split(".")[0][1:-1]
return database_name, schema_name
def can_access(self, permission_name: str, view_name: str) -> bool:
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
def can_access_all_queries(self) -> bool:
return self.can_access("all_query_access", "all_query_access")
def can_access_all_datasources(self) -> bool:
return self.can_access("all_datasource_access", "all_datasource_access")
def can_access_all_databases(self) -> bool:
return self.can_access("all_database_access", "all_database_access")
def can_access_database(self, database: Union["Database", "DruidCluster"]) -> bool:
return (
self.can_access_all_datasources()
or self.can_access_all_databases()
or self.can_access("database_access", database.perm)
)
def can_access_schema(self, datasource: "BaseDatasource") -> bool:
return (
self.can_access_all_datasources()
or self.can_access_database(datasource.database)
or self.can_access("schema_access", datasource.schema_perm or "")
)
def can_access_datasource(self, datasource: "BaseDatasource") -> bool:
try:
self.raise_for_access(datasource=datasource)
except SupersetSecurityException:
return False
return True
@staticmethod
def get_datasource_access_error_msg(datasource: "BaseDatasource") -> str:
return f"""This endpoint requires the datasource {datasource.name}, database or
`all_datasource_access` permission"""
@staticmethod
def get_datasource_access_link(
datasource: "BaseDatasource",
) -> Optional[str]:
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def get_datasource_access_error_object(
self, datasource: "BaseDatasource"
) -> SupersetError:
return SupersetError(
error_type=SupersetErrorType.DATASOURCE_SECURITY_ACCESS_ERROR,
message=self.get_datasource_access_error_msg(datasource),
level=ErrorLevel.ERROR,
extra={
"link": self.get_datasource_access_link(datasource),
"datasource": datasource.name,
},
)
def get_table_access_error_msg(
self, tables: Set["Table"]
) -> str:
quoted_tables = [f"`{table}`" for table in tables]
return f"""You need access to the following tables: {", ".join(quoted_tables)},
`all_database_access` or `all_datasource_access` permission"""
def get_table_access_error_object(self, tables: Set["Table"]) -> SupersetError:
return SupersetError(
error_type=SupersetErrorType.TABLE_SECURITY_ACCESS_ERROR,
message=self.get_table_access_error_msg(tables),
level=ErrorLevel.ERROR,
extra={
"link": self.get_table_access_link(tables),
"tables": [str(table) for table in tables],
},
)
def get_table_access_link(
self, tables: Set["Table"]
) -> Optional[str]:
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def can_access_table(self, database: "Database", table: "Table") -> bool:
try:
self.raise_for_access(database=database, table=table)
except SupersetSecurityException:
return False
return True
def get_public_role(self) -> Optional[Any]:
from superset import conf
if not conf.get("PUBLIC_ROLE_LIKE_GAMMA", False):
return None
return (
self.get_session.query(self.role_model)
.filter_by(name="Public")
.one_or_none()
)
def user_view_menu_names(self, permission_name: str) -> Set[str]:
base_query = (
self.get_session.query(self.viewmenu_model.name)
.join(self.permissionview_model)
.join(self.permission_model)
.join(assoc_permissionview_role)
.join(self.role_model)
)
if not g.user.is_anonymous:
view_menu_names = (
base_query.join(assoc_user_role)
.join(self.user_model)
.filter(self.user_model.id == g.user.id)
.filter(self.permission_model.name == permission_name)
).all()
return {s.name for s in view_menu_names}
public_role = self.get_public_role()
if public_role:
view_menu_names = (
base_query.filter(self.role_model.id == public_role.id).filter(
self.permission_model.name == permission_name
)
).all()
return {s.name for s in view_menu_names}
return set()
def get_schemas_accessible_by_user(
self, database: "Database", schemas: List[str], hierarchical: bool = True
) -> List[str]:
from superset.connectors.sqla.models import SqlaTable
if hierarchical and self.can_access_database(database):
return schemas
accessible_schemas = {
self.unpack_schema_perm(s)[1]
for s in self.user_view_menu_names("schema_access")
if s.startswith(f"[{database}].")
}
perms = self.user_view_menu_names("datasource_access")
if perms:
tables = (
self.get_session.query(SqlaTable.schema)
.filter(SqlaTable.database_id == database.id)
.filter(SqlaTable.schema.isnot(None))
.filter(SqlaTable.schema != "")
.filter(or_(SqlaTable.perm.in_(perms)))
.distinct()
)
accessible_schemas.update([table.schema for table in tables])
return [s for s in schemas if s in accessible_schemas]
def get_datasources_accessible_by_user(
self,
database: "Database",
datasource_names: List[DatasourceName],
schema: Optional[str] = None,
) -> List[DatasourceName]:
if self.can_access_database(database):
return datasource_names
if schema:
schema_perm = self.get_schema_perm(database, schema)
if schema_perm and self.can_access("schema_access", schema_perm):
return datasource_names
user_perms = self.user_view_menu_names("datasource_access")
schema_perms = self.user_view_menu_names("schema_access")
user_datasources = ConnectorRegistry.query_datasources_by_permissions(
database, user_perms, schema_perms
)
if schema:
names = {d.table_name for d in user_datasources if d.schema == schema}
return [d for d in datasource_names if d in names]
full_names = {d.full_name for d in user_datasources}
return [d for d in datasource_names if f"[{database}].[{d}]" in full_names]
def merge_perm(self, permission_name: str, view_menu_name: str) -> None:
logger.warning(
"This method 'merge_perm' is deprecated use add_permission_view_menu"
)
self.add_permission_view_menu(permission_name, view_menu_name)
def _is_user_defined_permission(self, perm: Model) -> bool:
return perm.permission.name in self.OBJECT_SPEC_PERMISSIONS
def create_custom_permissions(self) -> None:
self.add_permission_view_menu("all_datasource_access", "all_datasource_access")
self.add_permission_view_menu("all_database_access", "all_database_access")
self.add_permission_view_menu("all_query_access", "all_query_access")
def create_missing_perms(self) -> None:
from superset.connectors.base.models import BaseMetric
from superset.models import core as models
logger.info("Fetching a set of all perms to lookup which ones are missing")
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu: str, perm: str) -> None:
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.add_permission_view_menu(view_menu, perm)
logger.info("Creating missing datasource permissions.")
datasources = ConnectorRegistry.get_all_datasources()
for datasource in datasources:
merge_pv("datasource_access", datasource.get_perm())
merge_pv("schema_access", datasource.get_schema_perm())
logger.info("Creating missing database permissions.")
databases = self.get_session.query(models.Database).all()
for database in databases:
merge_pv("database_access", database.perm)
logger.info("Creating missing metrics permissions")
metrics: List[BaseMetric] = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(self.get_session.query(datasource_class.metric_class).all())
def clean_perms(self) -> None:
logger.info("Cleaning faulty perms")
sesh = self.get_session
pvms = sesh.query(PermissionView).filter(
or_(
PermissionView.permission
== None,
PermissionView.view_menu
== None,
)
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logger.info("Deleted %i faulty permissions", deleted_count)
def sync_role_definitions(self) -> None:
from superset import conf
logger.info("Syncing role definition")
self.create_custom_permissions()
self.set_role("Admin", self._is_admin_pvm)
self.set_role("Alpha", self._is_alpha_pvm)
self.set_role("Gamma", self._is_gamma_pvm)
self.set_role("granter", self._is_granter_pvm)
self.set_role("sql_lab", self._is_sql_lab_pvm)
if conf.get("PUBLIC_ROLE_LIKE_GAMMA", False):
self.set_role("Public", self._is_gamma_pvm)
self.create_missing_perms()
self.get_session.commit()
self.clean_perms()
def set_role(
self, role_name: str, pvm_check: Callable[[PermissionView], bool]
) -> None:
logger.info("Syncing %s perms", role_name)
sesh = self.get_session
pvms = sesh.query(PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.add_role(role_name)
role_pvms = [p for p in pvms if pvm_check(p)]
role.permissions = role_pvms
sesh.merge(role)
sesh.commit()
def _is_admin_only(self, pvm: Model) -> bool:
if (
pvm.view_menu.name in self.READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ADMIN_ONLY_VIEW_MENUS
or pvm.permission.name in self.ADMIN_ONLY_PERMISSIONS
)
def _is_alpha_only(self, pvm: PermissionModelView) -> bool:
if (
pvm.view_menu.name in self.GAMMA_READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ALPHA_ONLY_VIEW_MENUS
or pvm.permission.name in self.ALPHA_ONLY_PERMISSIONS
)
def _is_accessible_to_all(self, pvm: PermissionModelView) -> bool:
return pvm.permission.name in self.ACCESSIBLE_PERMS
def _is_admin_pvm(self, pvm: PermissionModelView) -> bool:
return not self._is_user_defined_permission(pvm)
def _is_alpha_pvm(self, pvm: PermissionModelView) -> bool:
return not (
self._is_user_defined_permission(pvm) or self._is_admin_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_gamma_pvm(self, pvm: PermissionModelView) -> bool:
return not (
self._is_user_defined_permission(pvm)
or self._is_admin_only(pvm)
or self._is_alpha_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_sql_lab_pvm(self, pvm: PermissionModelView) -> bool:
return (
pvm.view_menu.name
in {"SQL Lab", "SQL Editor", "Query Search", "Saved Queries"}
or pvm.permission.name
in {
"can_sql_json",
"can_csv",
"can_search_queries",
"can_sqllab_viz",
"can_sqllab_table_viz",
"can_sqllab",
}
or (
pvm.view_menu.name in self.USER_MODEL_VIEWS
and pvm.permission.name == "can_list"
)
)
def _is_granter_pvm(
self, pvm: PermissionModelView
) -> bool:
return pvm.permission.name in {"can_override_role_permissions", "can_approve"}
def set_perm(
self, mapper: Mapper, connection: Connection, target: "BaseDatasource"
) -> None:
link_table = target.__table__
if target.perm != target.get_perm():
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(perm=target.get_perm())
)
if (
hasattr(target, "schema_perm")
and target.schema_perm != target.get_schema_perm()
):
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(schema_perm=target.get_schema_perm())
)
pvm_names = []
if target.__tablename__ in {"dbs", "clusters"}:
pvm_names.append(("database_access", target.get_perm()))
else:
pvm_names.append(("datasource_access", target.get_perm()))
if target.schema:
pvm_names.append(("schema_access", target.get_schema_perm()))
for permission_name, view_menu_name in pvm_names:
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if not permission:
permission_table = (
self.permission_model.__table__
)
connection.execute(
permission_table.insert().values(name=permission_name)
)
permission = self.find_permission(permission_name)
if not view_menu:
view_menu_table = (
self.viewmenu_model.__table__
)
connection.execute(view_menu_table.insert().values(name=view_menu_name))
view_menu = self.find_view_menu(view_menu_name)
if permission and view_menu:
pv = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
if not pv and permission and view_menu:
permission_view_table = (
self.permissionview_model.__table__
)
connection.execute(
permission_view_table.insert().values(
permission_id=permission.id, view_menu_id=view_menu.id
)
)
def raise_for_access(
self,
database: Optional["Database"] = None,
datasource: Optional["BaseDatasource"] = None,
query: Optional["Query"] = None,
query_context: Optional["QueryContext"] = None,
table: Optional["Table"] = None,
viz: Optional["BaseViz"] = None,
) -> None:
from superset.connectors.sqla.models import SqlaTable
from superset.sql_parse import Table
if database and table or query:
if query:
database = query.database
database = cast("Database", database)
if self.can_access_database(database):
return
if query:
tables = {
Table(table_.table, table_.schema or query.schema)
for table_ in sql_parse.ParsedQuery(query.sql).tables
}
elif table:
tables = {table}
denied = set()
for table_ in tables:
schema_perm = self.get_schema_perm(database, schema=table_.schema)
if not (schema_perm and self.can_access("schema_access", schema_perm)):
datasources = SqlaTable.query_datasources_by_name(
database, table_.table, schema=table_.schema
)
for datasource_ in datasources:
if self.can_access("datasource_access", datasource_.perm):
break
else:
denied.add(table_)
if denied:
raise SupersetSecurityException(
self.get_table_access_error_object(denied)
)
if datasource or query_context or viz:
if query_context:
datasource = query_context.datasource
elif viz:
datasource = viz.datasource
assert datasource
if not (
self.can_access_schema(datasource)
or self.can_access("datasource_access", datasource.perm or "")
):
raise SupersetSecurityException(
self.get_datasource_access_error_object(datasource)
)
def get_rls_filters(
self, table: "BaseDatasource"
) -> List[SqlaQuery]:
if hasattr(g, "user") and hasattr(g.user, "id"):
from superset.connectors.sqla.models import (
RLSFilterRoles,
RLSFilterTables,
RowLevelSecurityFilter,
)
user_roles = (
self.get_session.query(assoc_user_role.c.role_id)
.filter(assoc_user_role.c.user_id == g.user.id)
.subquery()
)
filter_roles = (
self.get_session.query(RLSFilterRoles.c.rls_filter_id)
.filter(RLSFilterRoles.c.role_id.in_(user_roles))
.subquery()
)
filter_tables = (
self.get_session.query(RLSFilterTables.c.rls_filter_id)
.filter(RLSFilterTables.c.table_id == table.id)
.subquery()
)
query = (
self.get_session.query(
RowLevelSecurityFilter.id, RowLevelSecurityFilter.clause
)
.filter(RowLevelSecurityFilter.id.in_(filter_tables))
.filter(RowLevelSecurityFilter.id.in_(filter_roles))
)
return query.all()
return []
def get_rls_ids(self, table: "BaseDatasource") -> List[int]:
ids = [f.id for f in self.get_rls_filters(table)]
ids.sort()
return ids
| true | true |
f731c50ed0f870cecde73f61bbdb8ad20a18c647 | 7,795 | py | Python | api_tests/users/views/test_user_preprints_list.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | 1 | 2019-12-23T04:30:20.000Z | 2019-12-23T04:30:20.000Z | api_tests/users/views/test_user_preprints_list.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | 20 | 2020-03-24T16:48:03.000Z | 2022-03-08T22:38:38.000Z | api_tests/users/views/test_user_preprints_list.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | import pytest
from api.base.settings.defaults import API_BASE
from api_tests.preprints.filters.test_filters import PreprintsListFilteringMixin
from api_tests.preprints.views.test_preprint_list_mixin import PreprintIsPublishedListMixin, PreprintIsValidListMixin
from osf_tests.factories import (
ProjectFactory,
PreprintFactory,
AuthUserFactory,
PreprintProviderFactory,
)
from osf.utils import permissions
@pytest.mark.django_db
class TestUserPreprints:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def preprint(self, user_one):
return PreprintFactory(title='Preprint User One', creator=user_one)
@pytest.fixture()
def project_public(self, user_one):
return ProjectFactory(
title='Public Project User One',
is_public=True,
creator=user_one)
@pytest.fixture()
def project_private(self, user_one):
return ProjectFactory(
title='Private Project User One',
is_public=False,
creator=user_one)
def test_gets(
self, app, user_one, user_two, preprint,
project_public, project_private):
# test_authorized_in_gets_200
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_anonymous_gets_200
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_get_preprints_logged_in
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert preprint._id in ids
assert project_public._id not in ids
assert project_private._id not in ids
# test_get_projects_not_logged_in
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert preprint._id in ids
assert project_public._id not in ids
assert project_private._id not in ids
# test_get_projects_logged_in_as_different_user
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert preprint._id in ids
assert project_public._id not in ids
assert project_private._id not in ids
class TestUserPreprintsListFiltering(PreprintsListFilteringMixin):
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def provider_one(self):
return PreprintProviderFactory(name='Sockarxiv')
@pytest.fixture()
def provider_two(self):
return PreprintProviderFactory(name='Piratearxiv')
@pytest.fixture()
def provider_three(self, provider_one):
return provider_one
@pytest.fixture()
def project_one(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def project_two(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def project_three(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def url(self, user):
return '/{}users/{}/preprints/?version=2.2&'.format(API_BASE, user._id)
def test_provider_filter_equals_returns_one(
self, app, user, provider_two, preprint_two, provider_url):
expected = [preprint_two._id]
res = app.get(
'{}{}'.format(
provider_url,
provider_two._id),
auth=user.auth)
actual = [preprint['id'] for preprint in res.json['data']]
assert expected == actual
class TestUserPreprintIsPublishedList(PreprintIsPublishedListMixin):
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def provider_one(self):
return PreprintProviderFactory()
@pytest.fixture()
def provider_two(self, provider_one):
return provider_one
@pytest.fixture()
def project_published(self, user_admin_contrib):
return ProjectFactory(creator=user_admin_contrib, is_public=True)
@pytest.fixture()
def project_public(self, user_admin_contrib, user_write_contrib):
project_public = ProjectFactory(
creator=user_admin_contrib, is_public=True)
project_public.add_contributor(
user_write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True)
return project_public
@pytest.fixture()
def url(self, user_admin_contrib):
return '/{}users/{}/preprints/?version=2.2&'.format(
API_BASE, user_admin_contrib._id)
@pytest.fixture()
def preprint_unpublished(
self, user_admin_contrib, provider_one,
project_public, subject):
return PreprintFactory(
creator=user_admin_contrib,
filename='mgla.pdf',
provider=provider_one,
subjects=[[subject._id]],
project=project_public,
is_published=False)
def test_unpublished_visible_to_admins(
self, app, user_admin_contrib, preprint_unpublished,
preprint_published, url):
res = app.get(url, auth=user_admin_contrib.auth)
assert len(res.json['data']) == 2
assert preprint_unpublished._id in [d['id'] for d in res.json['data']]
def test_unpublished_invisible_to_write_contribs(
self, app, user_write_contrib, preprint_unpublished,
preprint_published, url):
res = app.get(url, auth=user_write_contrib.auth)
assert len(res.json['data']) == 1
assert preprint_unpublished._id not in [
d['id'] for d in res.json['data']]
def test_filter_published_false_write_contrib(
self, app, user_write_contrib, preprint_unpublished, url):
res = app.get(
'{}filter[is_published]=false'.format(url),
auth=user_write_contrib.auth)
assert len(res.json['data']) == 0
class TestUserPreprintIsValidList(PreprintIsValidListMixin):
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user_admin_contrib, user_write_contrib):
project = ProjectFactory(creator=user_admin_contrib, is_public=True)
project.add_contributor(
user_write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True)
return project
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def url(self, user_admin_contrib):
return '/{}users/{}/preprints/?version=2.2&'.format(
API_BASE, user_admin_contrib._id)
# test override: user nodes/preprints routes do not show private nodes to
# anyone but the self
def test_preprint_private_visible_write(
self, app, user_write_contrib, project, preprint, url):
res = app.get(url, auth=user_write_contrib.auth)
assert len(res.json['data']) == 1
project.is_public = False
project.save()
res = app.get(url, auth=user_write_contrib.auth)
assert len(res.json['data']) == 0
| 32.615063 | 117 | 0.655805 | import pytest
from api.base.settings.defaults import API_BASE
from api_tests.preprints.filters.test_filters import PreprintsListFilteringMixin
from api_tests.preprints.views.test_preprint_list_mixin import PreprintIsPublishedListMixin, PreprintIsValidListMixin
from osf_tests.factories import (
ProjectFactory,
PreprintFactory,
AuthUserFactory,
PreprintProviderFactory,
)
from osf.utils import permissions
@pytest.mark.django_db
class TestUserPreprints:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def preprint(self, user_one):
return PreprintFactory(title='Preprint User One', creator=user_one)
@pytest.fixture()
def project_public(self, user_one):
return ProjectFactory(
title='Public Project User One',
is_public=True,
creator=user_one)
@pytest.fixture()
def project_private(self, user_one):
return ProjectFactory(
title='Private Project User One',
is_public=False,
creator=user_one)
def test_gets(
self, app, user_one, user_two, preprint,
project_public, project_private):
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert preprint._id in ids
assert project_public._id not in ids
assert project_private._id not in ids
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert preprint._id in ids
assert project_public._id not in ids
assert project_private._id not in ids
url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert preprint._id in ids
assert project_public._id not in ids
assert project_private._id not in ids
class TestUserPreprintsListFiltering(PreprintsListFilteringMixin):
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def provider_one(self):
return PreprintProviderFactory(name='Sockarxiv')
@pytest.fixture()
def provider_two(self):
return PreprintProviderFactory(name='Piratearxiv')
@pytest.fixture()
def provider_three(self, provider_one):
return provider_one
@pytest.fixture()
def project_one(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def project_two(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def project_three(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def url(self, user):
return '/{}users/{}/preprints/?version=2.2&'.format(API_BASE, user._id)
def test_provider_filter_equals_returns_one(
self, app, user, provider_two, preprint_two, provider_url):
expected = [preprint_two._id]
res = app.get(
'{}{}'.format(
provider_url,
provider_two._id),
auth=user.auth)
actual = [preprint['id'] for preprint in res.json['data']]
assert expected == actual
class TestUserPreprintIsPublishedList(PreprintIsPublishedListMixin):
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def provider_one(self):
return PreprintProviderFactory()
@pytest.fixture()
def provider_two(self, provider_one):
return provider_one
@pytest.fixture()
def project_published(self, user_admin_contrib):
return ProjectFactory(creator=user_admin_contrib, is_public=True)
@pytest.fixture()
def project_public(self, user_admin_contrib, user_write_contrib):
project_public = ProjectFactory(
creator=user_admin_contrib, is_public=True)
project_public.add_contributor(
user_write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True)
return project_public
@pytest.fixture()
def url(self, user_admin_contrib):
return '/{}users/{}/preprints/?version=2.2&'.format(
API_BASE, user_admin_contrib._id)
@pytest.fixture()
def preprint_unpublished(
self, user_admin_contrib, provider_one,
project_public, subject):
return PreprintFactory(
creator=user_admin_contrib,
filename='mgla.pdf',
provider=provider_one,
subjects=[[subject._id]],
project=project_public,
is_published=False)
def test_unpublished_visible_to_admins(
self, app, user_admin_contrib, preprint_unpublished,
preprint_published, url):
res = app.get(url, auth=user_admin_contrib.auth)
assert len(res.json['data']) == 2
assert preprint_unpublished._id in [d['id'] for d in res.json['data']]
def test_unpublished_invisible_to_write_contribs(
self, app, user_write_contrib, preprint_unpublished,
preprint_published, url):
res = app.get(url, auth=user_write_contrib.auth)
assert len(res.json['data']) == 1
assert preprint_unpublished._id not in [
d['id'] for d in res.json['data']]
def test_filter_published_false_write_contrib(
self, app, user_write_contrib, preprint_unpublished, url):
res = app.get(
'{}filter[is_published]=false'.format(url),
auth=user_write_contrib.auth)
assert len(res.json['data']) == 0
class TestUserPreprintIsValidList(PreprintIsValidListMixin):
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user_admin_contrib, user_write_contrib):
project = ProjectFactory(creator=user_admin_contrib, is_public=True)
project.add_contributor(
user_write_contrib,
permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS,
save=True)
return project
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def url(self, user_admin_contrib):
return '/{}users/{}/preprints/?version=2.2&'.format(
API_BASE, user_admin_contrib._id)
def test_preprint_private_visible_write(
self, app, user_write_contrib, project, preprint, url):
res = app.get(url, auth=user_write_contrib.auth)
assert len(res.json['data']) == 1
project.is_public = False
project.save()
res = app.get(url, auth=user_write_contrib.auth)
assert len(res.json['data']) == 0
| true | true |
f731c668ffccf5b4cbde0be62847a563f4f61341 | 14,260 | py | Python | packages/syft/src/syft/grid/duet/webrtc_duet.py | pculliton/PySyft | 23a0d1442d3d901b1139aeabe079ccf4177ebc0d | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/grid/duet/webrtc_duet.py | pculliton/PySyft | 23a0d1442d3d901b1139aeabe079ccf4177ebc0d | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/grid/duet/webrtc_duet.py | pculliton/PySyft | 23a0d1442d3d901b1139aeabe079ccf4177ebc0d | [
"Apache-2.0"
] | null | null | null | """
PySyft Duet (WebRTC)
This class aims to implement the PySyft Duet concept by using WebRTC protocol as a
connection channel in order to allow two different users to establish a direct
connection with high-quality Real-time Communication using private addresses.
The most common example showing how it can be used is the notebook demo example:
Two different jupyter / collab notebooks in different machines using private addresses
behind routers, proxies and firewalls can connect using a full-duplex channel
to perform machine learning and data science tasks, working as a client
and server at the same time.
PS 1: You need a signaling server running somewhere.
If you don't know any public address running this service, or want to set up your own
signaling network you can use PyGrid's network app.
For local development you can run:
$ python src/syft/grid/example_nodes/network.py
PS 2: The PyGrid repo has a complimentary branch that matches the current PySyft release.
To use this feature you must use the correct PyGrid branch.
(https://github.com/OpenMined/PyGrid/)
You can get more details about all this process, in the syft/grid/connections/webrtc.py
source code.
"""
# stdlib
import asyncio
from typing import Optional
# third party
from nacl.signing import SigningKey
# relative
from ... import serialize
from ...core.io.route import SoloRoute
from ...core.node.common.metadata import Metadata
from ...core.node.domain.client import DomainClient
from ...core.node.domain.domain import Domain
from ...logger import error
from ...logger import traceback_and_raise
from ..connections.webrtc import WebRTCConnection
from ..services.signaling_service import AnswerPullRequestMessage
from ..services.signaling_service import InvalidLoopBackRequest
from ..services.signaling_service import OfferPullRequestMessage
from ..services.signaling_service import SignalingAnswerMessage
from ..services.signaling_service import SignalingOfferMessage
from .signaling_client import SignalingClient
class Duet(DomainClient):
def __init__(
self,
node: Domain,
target_id: str,
signaling_client: SignalingClient,
offer: bool = True,
):
# Generate a signing key
self.signing_key = SigningKey.generate()
self.verify_key = self.signing_key.verify_key
# Async Queues
# These queues will be used in order to enqueue/dequeue
# messages to be sent to the signaling server.
self._push_msg_queue: asyncio.Queue = asyncio.Queue()
self._pull_msg_queue: asyncio.Queue = asyncio.Queue()
# As we need to inject a node instance inside of
# a bidirectional connection in order to allow this
# connection to work as a client and server using the
# same channel. We need to be aware about forwarding
# node instance references in order to avoid multiple
# references to the same object (this makes the garbage
# collecting difficult).
# A good solution to avoid this problem is forward just
# weak references. These references works like a proxy
# not creating a strong reference to the object.
# So, If we delete the real object instance, the
# garbage collect will call the __del__ method without problem.
self.node = node
# WebRTCConnection instance ( Bidirectional Connection )
self.connection = WebRTCConnection(node=self.node)
# Client used to exchange signaling messages in order to establish a connection
# NOTE: In the future it may be a good idea to modularize this client to make
# it pluggable using different connection protocols.
self.signaling_client = signaling_client
# If this peer will not start the signaling process
if not offer:
# Start adding an OfferPullRequest in order to verify if
# the desired address pushed an offer request to connect with you.
# This will trigger the pull async task to be check signaling notifications
self._pull_msg_queue.put_nowait(
OfferPullRequestMessage(
address=self.signaling_client.address,
target_peer=target_id,
host_peer=self.signaling_client.duet_id,
reply_to=self.signaling_client.address,
)
)
else:
# Push a WebRTC offer request to the address.
self.send_offer(target_id=target_id)
# This flag is used in order to finish the signaling process gracefully
# While self._available is True, the pull/push tasks will be running
# This flag will be setted to false when:
# 1 - End of the signaling process (checked by _update_availability()).
# 2 - Any Exception raised during these tasks.
self._available = True
# This attribute will be setted during the signaling messages exchange,
# and used to create a SoloRoute for the both sides.
self._client_metadata: Optional[Metadata] = None
# Start async tasks and wait until one of them finishes.
# As mentioned before, these tasks can be finished by two reasons:
# 1 - Signaling process ends
# 2 - Unexpected Exception
try:
asyncio.run(self.notify())
# If client_metadata != None, then the connection was created successfully.
if self._client_metadata is not None:
# Deserialize client's metadata in order to obtain
# PySyft's location structure
( # type: ignore
spec_location,
name,
_,
) = DomainClient.deserialize_client_metadata_from_node(
metadata=serialize(self._client_metadata)
)
# Create a SoloRoute
route = SoloRoute(destination=spec_location, connection=self.connection)
# Intialize the super class
super().__init__(
domain=spec_location,
name=name,
routes=[route],
signing_key=self.signing_key,
verify_key=self.verify_key,
)
self.connection._client_address = self.address
# If client_metada is None, then an exception occurred during the process
# The exception has been caught and saved in self._exception
else:
# NOTE: Maybe we should create a custom exception type.
traceback_and_raise(
Exception(
f"Something went wrong during the Duet init process. {self._exception}"
)
)
except Exception as e:
traceback_and_raise(e)
async def notify(self) -> None:
try:
# Enqueue Pull/Push async tasks
push_task = asyncio.ensure_future(self.push())
pull_task = asyncio.ensure_future(self.pull())
# Wait until one of them finishes
done, pending = await asyncio.wait(
[pull_task, push_task], return_when=asyncio.FIRST_COMPLETED
)
# Finish the pending one.
for task in pending:
task.cancel()
except Exception as e:
traceback_and_raise(e)
def close(self) -> None:
self.connection.close()
async def push(self) -> None:
# This task is responsible for pushing offer/answer messages.
try:
while self._available:
# If push_msg_queue is empty,
# give up task queue priority, giving
# computing time to the next task.
msg = await self._push_msg_queue.get()
# If self.push_msg_queue.get() returned a message (SignalingOfferMessage,SignalingAnswerMessage)
# send it to the signaling server.
self.signaling_client.send_immediate_msg_without_reply(msg=msg)
except Exception as e:
log = f"Got an exception in Duet push. {e}"
error(log)
# If any exception raises, set the self._available flag to False
# in order to finish gracefully all the async tasks and save the exception.
self._available = False
self._exception: Exception = e
async def pull(self) -> None:
try:
while self._available:
# If pull_msg_queue is empty,
# give up task queue priority, giving
# computing time to the next task.
msg = await self._pull_msg_queue.get()
# If self.pull_msg_queue.get() returned a message (OfferPullRequestMessage,AnswerPullRequestMessage)
# send it to the signaling server.
_response = self.signaling_client.send_immediate_msg_with_reply(msg=msg)
# If Signaling Offer Message was found
if isinstance(_response, SignalingOfferMessage):
await self._send_answer(msg=_response)
# If Signaling Answer Message was found
elif isinstance(_response, SignalingAnswerMessage):
await self._ack(msg=_response)
# If LoopBack Message it was a loopback request
elif isinstance(_response, InvalidLoopBackRequest):
traceback_and_raise(
Exception(
"You can't perform p2p connection using your current node address as a destination peer."
)
)
# If Signaling Message weren't found
else:
# Just enqueue the request to be processed later.
self._pull_msg_queue.put_nowait(msg)
# Checks if the signaling process is over.
self._available = self._update_availability()
await asyncio.sleep(0.5)
except Exception as e:
log = f"Got an exception in Duet pull. {e}"
error(log)
# If any exception raises, set the self._available flag to False
# in order to finish gracefully all the async tasks and save the exception.
self._available = False
self._exception = e
def send_offer(self, target_id: str) -> None:
"""Starts a new signaling process by creating a new
offer message and pushing it to the Signaling Server."""
try:
# Generates an offer request payload containing
# local network description data/metadata (IP, MAC, Mask, etc...)
payload = asyncio.run(self.connection._set_offer())
# Creates a PySyft's SignalingOfferMessage
signaling_offer = SignalingOfferMessage(
address=self.signaling_client.address, # Target's address
payload=payload, # Offer Payload
host_metadata=self.node.get_metadata_for_client(), # Own Node Metadata
target_peer=target_id,
host_peer=self.signaling_client.duet_id, # Own Node ID
)
# Enqueue it in push msg queue to be sent to the signaling server.
self._push_msg_queue.put_nowait(signaling_offer)
# Create/enqueue a new AnswerPullRequest in order to wait for signaling response.
self._pull_msg_queue.put_nowait(
AnswerPullRequestMessage(
address=self.signaling_client.address,
target_peer=target_id,
host_peer=self.signaling_client.duet_id,
reply_to=self.signaling_client.address,
)
)
except Exception as e:
traceback_and_raise(e)
async def _send_answer(self, msg: SignalingOfferMessage) -> None:
"""Process SignalingOfferMessage and create a new
SignalingAnswerMessage as a response"""
try:
# Process received offer message updating target's remote address
# Generates an answer request payload containing
# local network description data/metadata (IP, MAC, Mask, etc...)
payload = asyncio.run(self.connection._set_answer(payload=msg.payload))
# Save remote node's metadata in roder to create a SoloRoute.
self._client_metadata = msg.host_metadata
# Create a new SignalingAnswerMessage
signaling_answer = SignalingAnswerMessage(
address=self.signaling_client.address,
payload=payload, # Signaling answer payload
host_metadata=self.node.get_metadata_for_client(), # Own Node Metadata
target_peer=msg.host_peer, # Remote Node ID
host_peer=self.signaling_client.duet_id,
)
# Enqueue it in the push msg queue to be sent to the signaling server.
await self._push_msg_queue.put(signaling_answer)
except Exception as e:
traceback_and_raise(e)
async def _ack(self, msg: SignalingAnswerMessage) -> None:
"""Last signaling message, stores remote Node
metadata and updates target's remote address"""
try:
# Save remote node's metadata in roder to create a SoloRoute.
self._client_metadata = msg.host_metadata
# Process received offer message updating target's remote address
await self.connection._process_answer(payload=msg.payload)
except Exception as e:
traceback_and_raise(e)
def _update_availability(self) -> bool:
"""Method used to check if the signaling process is over.
:return: Boolean flag, True if it's NOT over, and False if it's over.
:rtype: Boolean
"""
available = False
try:
available = (
not self._pull_msg_queue.empty()
and self.connection.peer_connection is not None
)
except Exception as e:
traceback_and_raise(e)
return available
| 42.822823 | 117 | 0.629523 |
import asyncio
from typing import Optional
from nacl.signing import SigningKey
from ... import serialize
from ...core.io.route import SoloRoute
from ...core.node.common.metadata import Metadata
from ...core.node.domain.client import DomainClient
from ...core.node.domain.domain import Domain
from ...logger import error
from ...logger import traceback_and_raise
from ..connections.webrtc import WebRTCConnection
from ..services.signaling_service import AnswerPullRequestMessage
from ..services.signaling_service import InvalidLoopBackRequest
from ..services.signaling_service import OfferPullRequestMessage
from ..services.signaling_service import SignalingAnswerMessage
from ..services.signaling_service import SignalingOfferMessage
from .signaling_client import SignalingClient
class Duet(DomainClient):
def __init__(
self,
node: Domain,
target_id: str,
signaling_client: SignalingClient,
offer: bool = True,
):
self.signing_key = SigningKey.generate()
self.verify_key = self.signing_key.verify_key
self._push_msg_queue: asyncio.Queue = asyncio.Queue()
self._pull_msg_queue: asyncio.Queue = asyncio.Queue()
self.node = node
self.connection = WebRTCConnection(node=self.node)
self.signaling_client = signaling_client
if not offer:
self._pull_msg_queue.put_nowait(
OfferPullRequestMessage(
address=self.signaling_client.address,
target_peer=target_id,
host_peer=self.signaling_client.duet_id,
reply_to=self.signaling_client.address,
)
)
else:
self.send_offer(target_id=target_id)
self._available = True
self._client_metadata: Optional[Metadata] = None
try:
asyncio.run(self.notify())
if self._client_metadata is not None:
# PySyft's location structure
(
spec_location,
name,
_,
) = DomainClient.deserialize_client_metadata_from_node(
metadata=serialize(self._client_metadata)
)
route = SoloRoute(destination=spec_location, connection=self.connection)
super().__init__(
domain=spec_location,
name=name,
routes=[route],
signing_key=self.signing_key,
verify_key=self.verify_key,
)
self.connection._client_address = self.address
else:
traceback_and_raise(
Exception(
f"Something went wrong during the Duet init process. {self._exception}"
)
)
except Exception as e:
traceback_and_raise(e)
async def notify(self) -> None:
try:
push_task = asyncio.ensure_future(self.push())
pull_task = asyncio.ensure_future(self.pull())
done, pending = await asyncio.wait(
[pull_task, push_task], return_when=asyncio.FIRST_COMPLETED
)
for task in pending:
task.cancel()
except Exception as e:
traceback_and_raise(e)
def close(self) -> None:
self.connection.close()
async def push(self) -> None:
try:
while self._available:
msg = await self._push_msg_queue.get()
self.signaling_client.send_immediate_msg_without_reply(msg=msg)
except Exception as e:
log = f"Got an exception in Duet push. {e}"
error(log)
self._available = False
self._exception: Exception = e
async def pull(self) -> None:
try:
while self._available:
msg = await self._pull_msg_queue.get()
_response = self.signaling_client.send_immediate_msg_with_reply(msg=msg)
if isinstance(_response, SignalingOfferMessage):
await self._send_answer(msg=_response)
elif isinstance(_response, SignalingAnswerMessage):
await self._ack(msg=_response)
elif isinstance(_response, InvalidLoopBackRequest):
traceback_and_raise(
Exception(
"You can't perform p2p connection using your current node address as a destination peer."
)
)
# If Signaling Message weren't found
else:
self._pull_msg_queue.put_nowait(msg)
self._available = self._update_availability()
await asyncio.sleep(0.5)
except Exception as e:
log = f"Got an exception in Duet pull. {e}"
error(log)
self._available = False
self._exception = e
def send_offer(self, target_id: str) -> None:
try:
payload = asyncio.run(self.connection._set_offer())
signaling_offer = SignalingOfferMessage(
address=self.signaling_client.address, # Target's address
payload=payload,
host_metadata=self.node.get_metadata_for_client(),
target_peer=target_id,
host_peer=self.signaling_client.duet_id,
)
self._push_msg_queue.put_nowait(signaling_offer)
self._pull_msg_queue.put_nowait(
AnswerPullRequestMessage(
address=self.signaling_client.address,
target_peer=target_id,
host_peer=self.signaling_client.duet_id,
reply_to=self.signaling_client.address,
)
)
except Exception as e:
traceback_and_raise(e)
async def _send_answer(self, msg: SignalingOfferMessage) -> None:
try:
# Generates an answer request payload containing
# local network description data/metadata (IP, MAC, Mask, etc...)
payload = asyncio.run(self.connection._set_answer(payload=msg.payload))
# Save remote node's metadata in roder to create a SoloRoute.
self._client_metadata = msg.host_metadata
signaling_answer = SignalingAnswerMessage(
address=self.signaling_client.address,
payload=payload,
host_metadata=self.node.get_metadata_for_client(),
target_peer=msg.host_peer,
host_peer=self.signaling_client.duet_id,
)
await self._push_msg_queue.put(signaling_answer)
except Exception as e:
traceback_and_raise(e)
async def _ack(self, msg: SignalingAnswerMessage) -> None:
try:
self._client_metadata = msg.host_metadata
# Process received offer message updating target's remote address
await self.connection._process_answer(payload=msg.payload)
except Exception as e:
traceback_and_raise(e)
def _update_availability(self) -> bool:
available = False
try:
available = (
not self._pull_msg_queue.empty()
and self.connection.peer_connection is not None
)
except Exception as e:
traceback_and_raise(e)
return available
| true | true |
f731c670570a5ddaf363e2a308eba6f30d1c3ed7 | 4,328 | py | Python | model-optimizer/extensions/middle/GatherNdNormalizer.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | null | null | null | model-optimizer/extensions/middle/GatherNdNormalizer.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | 19 | 2021-03-26T08:11:00.000Z | 2022-02-21T13:06:26.000Z | model-optimizer/extensions/middle/GatherNdNormalizer.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | 1 | 2021-07-28T17:30:46.000Z | 2021-07-28T17:30:46.000Z | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from extensions.ops.gather import Gather
from mo.front.common.partial_infer.utils import int64_array
from mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs
from mo.graph.graph import Graph, rename_node
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.reshape import Reshape
class GatherNDNormalize(MiddleReplacementPattern):
"""
Hot fix for new speech-to-text model enabling while GatherND is not implemented in IE.
We can replace GatherND to Reshape + Gather in case when GatherND indices have just one
meaningful dimension.
TODO: Investigate whether we must replace GatherND with Reshape + Gather always (due to performance benefits)
for this particular case or only if the plugin does not support GatherND.
And the best place for the transformation is nGraph so we need to move it.
"""
enabled = True
force_clean_up = True
def run_before(self):
from extensions.middle.BlockLSTMtoLSTMSequence import BlockLSTMtoLSTMSequence
return [BlockLSTMtoLSTMSequence]
def run_after(self):
from extensions.middle.pass_separator import MiddleStart
return [MiddleStart]
def pattern(self):
return dict(
nodes=[('GatherND', dict(kind='op', op='GatherND', batch_dims=0))],
edges=[]
)
@staticmethod
def indices_check(indices: np.array, input_shape: tuple):
"""
Check that indices have just one meaningful dimension and all other dimensions of input have size 1.
"""
n_dims = indices.shape[-1]
non_zero = None
for i in range(n_dims):
if not all(np.take(indices, indices=[i], axis=-1) == 0):
if non_zero is None:
non_zero = i
else:
return None
else:
if input_shape[i] != 1:
return None
return non_zero
def replace_pattern(self, graph: Graph, match: dict):
gather = match['GatherND']
gather_name = gather.soft_get('name', gather.id)
input_shape = gather.in_node(0).shape
indices = gather.in_node(1).value
if indices is None:
# We can't do such special pass without indices value
return
# 0. All needed checks that we can replace GatherND by Gather
gather_idx = self.indices_check(indices, input_shape)
if gather_idx is None:
log.warning('Node {} with op=GatherND can\'t be normalized to op=Gather.'.format(gather_name))
return
# 1. Add Reshape and connect
new_shape = int64_array([-1] + list(input_shape[indices.shape[-1]:]))
reshape = create_op_node_with_second_input(graph, Reshape, new_shape,
{'name': gather_name + '/Reshape_for_GatherND/'})
gather.in_port(0).get_connection().set_destination(reshape.in_port(0))
# 2. Change indices from Nd to 1d:
new_indices = np.reshape(np.take(indices, indices=[gather_idx], axis=-1), [-1])
rename_node(gather, gather_name + '/to_delete')
# 3. Create new Gather operation and reconnect all inputs/outputs
new_gather = create_op_with_const_inputs(graph, Gather, {1: new_indices, 2: int64_array(0)},
{'name': gather_name})
rename_node(new_gather, gather_name)
reshape.out_port(0).connect(new_gather.in_port(0))
gather.out_port(0).get_connection().set_source(new_gather.out_port(0))
# 4. Remove old Gather node
graph.remove_node(gather.id)
| 39.706422 | 113 | 0.661738 | import logging as log
import numpy as np
from extensions.ops.gather import Gather
from mo.front.common.partial_infer.utils import int64_array
from mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs
from mo.graph.graph import Graph, rename_node
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.reshape import Reshape
class GatherNDNormalize(MiddleReplacementPattern):
enabled = True
force_clean_up = True
def run_before(self):
from extensions.middle.BlockLSTMtoLSTMSequence import BlockLSTMtoLSTMSequence
return [BlockLSTMtoLSTMSequence]
def run_after(self):
from extensions.middle.pass_separator import MiddleStart
return [MiddleStart]
def pattern(self):
return dict(
nodes=[('GatherND', dict(kind='op', op='GatherND', batch_dims=0))],
edges=[]
)
@staticmethod
def indices_check(indices: np.array, input_shape: tuple):
n_dims = indices.shape[-1]
non_zero = None
for i in range(n_dims):
if not all(np.take(indices, indices=[i], axis=-1) == 0):
if non_zero is None:
non_zero = i
else:
return None
else:
if input_shape[i] != 1:
return None
return non_zero
def replace_pattern(self, graph: Graph, match: dict):
gather = match['GatherND']
gather_name = gather.soft_get('name', gather.id)
input_shape = gather.in_node(0).shape
indices = gather.in_node(1).value
if indices is None:
return
# 0. All needed checks that we can replace GatherND by Gather
gather_idx = self.indices_check(indices, input_shape)
if gather_idx is None:
log.warning('Node {} with op=GatherND can\'t be normalized to op=Gather.'.format(gather_name))
return
new_shape = int64_array([-1] + list(input_shape[indices.shape[-1]:]))
reshape = create_op_node_with_second_input(graph, Reshape, new_shape,
{'name': gather_name + '/Reshape_for_GatherND/'})
gather.in_port(0).get_connection().set_destination(reshape.in_port(0))
new_indices = np.reshape(np.take(indices, indices=[gather_idx], axis=-1), [-1])
rename_node(gather, gather_name + '/to_delete')
new_gather = create_op_with_const_inputs(graph, Gather, {1: new_indices, 2: int64_array(0)},
{'name': gather_name})
rename_node(new_gather, gather_name)
reshape.out_port(0).connect(new_gather.in_port(0))
gather.out_port(0).get_connection().set_source(new_gather.out_port(0))
graph.remove_node(gather.id)
| true | true |
f731c9a631559d95706fad6adef4295840c8b049 | 5,926 | py | Python | src/third_party/wiredtiger/test/suite/test_config02.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_config02.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | src/third_party/wiredtiger/test/suite/test_config02.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# [TEST_TAGS]
# connection_api:wiredtiger_open
# config_api
# [END_TAGS]
import os
import wiredtiger, wttest
# test_config02.py
# The home directory for wiredtiger_open
class test_config02(wttest.WiredTigerTestCase):
table_name1 = 'test_config02'
nentries = 100
# Each test needs to set up its connection in its own way,
# so override these methods to do nothing
def setUpConnectionOpen(self, dir):
return None
def setUpSessionOpen(self, conn):
return None
def populate_and_check(self):
"""
Create entries, and read back in a cursor: key=string, value=string
"""
create_args = 'key_format=S,value_format=S'
self.session.create("table:" + self.table_name1, create_args)
cursor = self.session.open_cursor('table:' + self.table_name1, None, None)
for i in range(0, self.nentries):
cursor[str(1000000 + i)] = 'value' + str(i)
i = 0
cursor.reset()
for key, value in cursor:
self.assertEqual(key, str(1000000 + i))
self.assertEqual(value, ('value' + str(i)))
i += 1
self.assertEqual(i, self.nentries)
cursor.close()
def checkfiles(self, dirname):
self.assertTrue(os.path.exists(dirname + os.sep + self.table_name1 + ".wt"))
def checknofiles(self, dirname):
self.assertEqual(len(os.listdir(dirname)), 0)
def common_test(self, homearg, homeenv, configextra):
"""
Call wiredtiger_open and run a simple test.
homearg is the first arg to wiredtiger_open, it may be null.
WIREDTIGER_HOME is set to homeenv, if it is not null.
configextra are any extra configuration strings needed on the open.
"""
try:
os.putenv('SOMEVAR', 'somevalue')
os.unsetenv('SOMEVAR')
except:
self.skipTest('putenv and/or unsetenv not support on this OS')
return
configarg = 'create'
if configextra != None:
configarg += ',' + configextra
if homeenv == None:
os.unsetenv('WIREDTIGER_HOME')
else:
os.putenv('WIREDTIGER_HOME', homeenv)
try:
self.conn = self.wiredtiger_open(homearg, configarg)
self.session = self.conn.open_session(None)
self.populate_and_check()
finally:
os.unsetenv('WIREDTIGER_HOME')
def test_home_nohome(self):
self.common_test(None, None, None)
self.checkfiles(".")
def test_home_rel(self):
dir = 'subdir'
os.mkdir(dir)
self.common_test(dir, None, None)
self.checkfiles(dir)
def test_home_abs(self):
dir = os.path.realpath('.') + os.sep + 'subdir'
os.mkdir(dir)
self.common_test(dir, None, None)
self.checkfiles(dir)
def test_home_and_env(self):
hdir = 'homedir'
edir = 'envdir'
os.mkdir(hdir)
os.mkdir(edir)
self.common_test(hdir, edir, None)
self.checkfiles(hdir)
self.checknofiles(edir)
def test_home_and_env_conf(self):
# If homedir is set, the environment is ignored
hdir = 'homedir'
edir = 'envdir'
os.mkdir(hdir)
os.mkdir(edir)
self.common_test(hdir, edir, None)
self.checkfiles(hdir)
self.checknofiles(edir)
def test_home_and_missing_env(self):
# If homedir is set, it is used no matter what
hdir = 'homedir'
os.mkdir(hdir)
self.common_test(hdir, None, None)
self.checkfiles(hdir)
def test_env_conf(self):
edir = 'envdir'
os.mkdir(edir)
self.common_test(None, edir, None)
self.checkfiles(edir)
def test_env_conf_without_env_var(self):
# no env var set, so should use current directory
self.common_test(None, None, None)
self.checkfiles(".")
def test_home_does_not_exist(self):
dir = 'nondir'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.wiredtiger_open(dir, 'create'),
'/(No such file or directory|The system cannot find the path specified)/')
def test_home_not_writeable(self):
if os.name == "nt":
self.skipTest('Unix specific test skipped on Windows')
dir = 'subdir'
os.mkdir(dir)
os.chmod(dir, 0o555)
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.wiredtiger_open(dir, 'create'),
'/Permission denied/')
if __name__ == '__main__':
wttest.run()
| 34.254335 | 86 | 0.643773 |
import os
import wiredtiger, wttest
class test_config02(wttest.WiredTigerTestCase):
table_name1 = 'test_config02'
nentries = 100
def setUpConnectionOpen(self, dir):
return None
def setUpSessionOpen(self, conn):
return None
def populate_and_check(self):
create_args = 'key_format=S,value_format=S'
self.session.create("table:" + self.table_name1, create_args)
cursor = self.session.open_cursor('table:' + self.table_name1, None, None)
for i in range(0, self.nentries):
cursor[str(1000000 + i)] = 'value' + str(i)
i = 0
cursor.reset()
for key, value in cursor:
self.assertEqual(key, str(1000000 + i))
self.assertEqual(value, ('value' + str(i)))
i += 1
self.assertEqual(i, self.nentries)
cursor.close()
def checkfiles(self, dirname):
self.assertTrue(os.path.exists(dirname + os.sep + self.table_name1 + ".wt"))
def checknofiles(self, dirname):
self.assertEqual(len(os.listdir(dirname)), 0)
def common_test(self, homearg, homeenv, configextra):
try:
os.putenv('SOMEVAR', 'somevalue')
os.unsetenv('SOMEVAR')
except:
self.skipTest('putenv and/or unsetenv not support on this OS')
return
configarg = 'create'
if configextra != None:
configarg += ',' + configextra
if homeenv == None:
os.unsetenv('WIREDTIGER_HOME')
else:
os.putenv('WIREDTIGER_HOME', homeenv)
try:
self.conn = self.wiredtiger_open(homearg, configarg)
self.session = self.conn.open_session(None)
self.populate_and_check()
finally:
os.unsetenv('WIREDTIGER_HOME')
def test_home_nohome(self):
self.common_test(None, None, None)
self.checkfiles(".")
def test_home_rel(self):
dir = 'subdir'
os.mkdir(dir)
self.common_test(dir, None, None)
self.checkfiles(dir)
def test_home_abs(self):
dir = os.path.realpath('.') + os.sep + 'subdir'
os.mkdir(dir)
self.common_test(dir, None, None)
self.checkfiles(dir)
def test_home_and_env(self):
hdir = 'homedir'
edir = 'envdir'
os.mkdir(hdir)
os.mkdir(edir)
self.common_test(hdir, edir, None)
self.checkfiles(hdir)
self.checknofiles(edir)
def test_home_and_env_conf(self):
hdir = 'homedir'
edir = 'envdir'
os.mkdir(hdir)
os.mkdir(edir)
self.common_test(hdir, edir, None)
self.checkfiles(hdir)
self.checknofiles(edir)
def test_home_and_missing_env(self):
hdir = 'homedir'
os.mkdir(hdir)
self.common_test(hdir, None, None)
self.checkfiles(hdir)
def test_env_conf(self):
edir = 'envdir'
os.mkdir(edir)
self.common_test(None, edir, None)
self.checkfiles(edir)
def test_env_conf_without_env_var(self):
self.common_test(None, None, None)
self.checkfiles(".")
def test_home_does_not_exist(self):
dir = 'nondir'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.wiredtiger_open(dir, 'create'),
'/(No such file or directory|The system cannot find the path specified)/')
def test_home_not_writeable(self):
if os.name == "nt":
self.skipTest('Unix specific test skipped on Windows')
dir = 'subdir'
os.mkdir(dir)
os.chmod(dir, 0o555)
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.wiredtiger_open(dir, 'create'),
'/Permission denied/')
if __name__ == '__main__':
wttest.run()
| true | true |
f731ca4684b44d80d310ce6e1649c542bdaf125d | 1,926 | py | Python | samples/vsphere/vcenter/certificatemanagement/trusted_root_chains_delete.py | mariolenz/vsphere-automation-sdk-python | 22d8a834f08216d36c8bfdbff98a132b38b8ef82 | [
"MIT"
] | null | null | null | samples/vsphere/vcenter/certificatemanagement/trusted_root_chains_delete.py | mariolenz/vsphere-automation-sdk-python | 22d8a834f08216d36c8bfdbff98a132b38b8ef82 | [
"MIT"
] | null | null | null | samples/vsphere/vcenter/certificatemanagement/trusted_root_chains_delete.py | mariolenz/vsphere-automation-sdk-python | 22d8a834f08216d36c8bfdbff98a132b38b8ef82 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2020. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = '7.0+'
import argparse
from vmware.vapi.vsphere.client import create_vsphere_client
import requests
from com.vmware.vcenter.certificate_management.vcenter_client import TrustedRootChains
from samples.vsphere.common import (sample_cli, sample_util)
"""
Description: Demonstrates the deletion of the TRUSTED ROOT CHAIN corresponding to the provided alias
Sample Prerequisites:
- The user invoking the API should have the CertificateManagement.Manage or the
CertificateManagement.Administer privilege
"""
parser = sample_cli.build_arg_parser()
parser.add_argument('--certalias',
required=True,
help='The alias for the certificate chain to be deleted from vCenter.')
args = sample_util.process_cli_args(parser.parse_args())
session = requests.session()
session.verify = False if args.skipverification else True
# Login to vCenter
vsphere_client = create_vsphere_client(server=args.server,
username=args.username,
password=args.password,
session=session)
cert_alias = args.certalias
print('Deleting the certificate chain corresponding to the alias ' + cert_alias)
vsphere_client.vcenter.certificate_management.vcenter.TrustedRootChains.delete(cert_alias)
| 35.018182 | 100 | 0.692627 |
__author__ = 'VMware, Inc.'
__vcenter_version__ = '7.0+'
import argparse
from vmware.vapi.vsphere.client import create_vsphere_client
import requests
from com.vmware.vcenter.certificate_management.vcenter_client import TrustedRootChains
from samples.vsphere.common import (sample_cli, sample_util)
parser = sample_cli.build_arg_parser()
parser.add_argument('--certalias',
required=True,
help='The alias for the certificate chain to be deleted from vCenter.')
args = sample_util.process_cli_args(parser.parse_args())
session = requests.session()
session.verify = False if args.skipverification else True
vsphere_client = create_vsphere_client(server=args.server,
username=args.username,
password=args.password,
session=session)
cert_alias = args.certalias
print('Deleting the certificate chain corresponding to the alias ' + cert_alias)
vsphere_client.vcenter.certificate_management.vcenter.TrustedRootChains.delete(cert_alias)
| true | true |
f731ca59ce1647a04a57816eab1d646df4920770 | 744 | py | Python | setup.py | nthparty/fe25519 | 4badb675c7118f8e116f20d21c055e7872df0593 | [
"MIT"
] | null | null | null | setup.py | nthparty/fe25519 | 4badb675c7118f8e116f20d21c055e7872df0593 | [
"MIT"
] | null | null | null | setup.py | nthparty/fe25519 | 4badb675c7118f8e116f20d21c055e7872df0593 | [
"MIT"
] | 1 | 2020-05-20T21:56:51.000Z | 2020-05-20T21:56:51.000Z | from setuptools import setup
with open("README.rst", "r") as fh:
long_description = fh.read().replace(".. include:: toc.rst\n\n", "")
# The lines below are parsed by `docs/conf.py`.
name = "fe25519"
version = "1.2.0"
setup(
name=name,
version=version,
packages=[name,],
install_requires=[
"parts~=1.3",
"bitlist~=0.7",
"fountains~=1.3"
],
license="MIT",
url="https://github.com/nthparty/fe25519",
author="Andrei Lapets",
author_email="a@lapets.io",
description="Pure Python data structure for working with Ed25519 "+\
"(and Ristretto) field elements and operations.",
long_description=long_description,
long_description_content_type="text/x-rst",
)
| 26.571429 | 72 | 0.633065 | from setuptools import setup
with open("README.rst", "r") as fh:
long_description = fh.read().replace(".. include:: toc.rst\n\n", "")
name = "fe25519"
version = "1.2.0"
setup(
name=name,
version=version,
packages=[name,],
install_requires=[
"parts~=1.3",
"bitlist~=0.7",
"fountains~=1.3"
],
license="MIT",
url="https://github.com/nthparty/fe25519",
author="Andrei Lapets",
author_email="a@lapets.io",
description="Pure Python data structure for working with Ed25519 "+\
"(and Ristretto) field elements and operations.",
long_description=long_description,
long_description_content_type="text/x-rst",
)
| true | true |
f731caeaa20126dfbac6f2f856ed43cf1d1b4fd9 | 3,511 | py | Python | utest/api/test_exposed_api.py | rdagum/robotframework | b7069d505374e9f09a140ed5a9727d2a40716446 | [
"ECL-2.0",
"Apache-2.0"
] | 7,073 | 2015-01-01T17:19:16.000Z | 2022-03-31T22:01:29.000Z | utest/api/test_exposed_api.py | imust6226/robotframework | 08c56fef2ebc64d682c7f99acd77c480d8d0e028 | [
"ECL-2.0",
"Apache-2.0"
] | 2,412 | 2015-01-02T09:29:05.000Z | 2022-03-31T13:10:46.000Z | utest/api/test_exposed_api.py | rticau/robotframework | 33ee46dfacd5173c0a38d89c1a60abf6a747c8c0 | [
"ECL-2.0",
"Apache-2.0"
] | 2,298 | 2015-01-03T02:47:15.000Z | 2022-03-31T02:00:16.000Z | import unittest
from os.path import join
from robot import api, model, parsing, reporting, result, running
from robot.api import parsing as api_parsing
from robot.utils.asserts import assert_equal, assert_true
class TestExposedApi(unittest.TestCase):
def test_execution_result(self):
assert_equal(api.ExecutionResult, result.ExecutionResult)
def test_test_suite(self):
assert_equal(api.TestSuite, running.TestSuite)
def test_result_writer(self):
assert_equal(api.ResultWriter, reporting.ResultWriter)
def test_visitors(self):
assert_equal(api.SuiteVisitor, model.SuiteVisitor)
assert_equal(api.ResultVisitor, result.ResultVisitor)
def test_deprecated_parsing(self):
assert_equal(api.get_model, parsing.get_model)
assert_equal(api.get_resource_model, parsing.get_resource_model)
assert_equal(api.get_tokens, parsing.get_tokens)
assert_equal(api.get_resource_tokens, parsing.get_resource_tokens)
assert_equal(api.Token, parsing.Token)
def test_parsing_getters(self):
assert_equal(api_parsing.get_model, parsing.get_model)
assert_equal(api_parsing.get_resource_model, parsing.get_resource_model)
assert_equal(api_parsing.get_tokens, parsing.get_tokens)
assert_equal(api_parsing.get_resource_tokens, parsing.get_resource_tokens)
def test_parsing_token(self):
assert_equal(api_parsing.Token, parsing.Token)
def test_parsing_model_statements(self):
for cls in parsing.model.Statement._statement_handlers.values():
assert_equal(getattr(api_parsing, cls.__name__), cls)
assert_true(not hasattr(api_parsing, 'Statement'))
def test_parsing_model_blocks(self):
for name in ('File', 'SettingSection', 'VariableSection', 'TestCaseSection',
'KeywordSection', 'CommentSection', 'TestCase', 'Keyword', 'For',
'If'):
assert_equal(getattr(api_parsing, name), getattr(parsing.model, name))
assert_true(not hasattr(api_parsing, 'Block'))
def test_parsing_visitors(self):
assert_equal(api_parsing.ModelVisitor, parsing.ModelVisitor)
assert_equal(api_parsing.ModelTransformer, parsing.ModelTransformer)
class TestModelObjects(unittest.TestCase):
"""These model objects are part of the public API.
They are only seldom needed directly and thus not exposed via the robot.api
package. Tests just validate they are not removed accidentally.
"""
def test_running_objects(self):
assert_true(running.TestSuite)
assert_true(running.TestCase)
assert_true(running.Keyword)
def test_result_objects(self):
assert_true(result.TestSuite)
assert_true(result.TestCase)
assert_true(result.Keyword)
class TestTestSuiteBuilder(unittest.TestCase):
# This list has paths like `/path/file.py/../file.robot` on purpose.
# They don't work unless normalized.
sources = [join(__file__, '../../../atest/testdata/misc', name)
for name in ('pass_and_fail.robot', 'normal.robot')]
def test_create_with_datasources_as_list(self):
suite = api.TestSuiteBuilder().build(*self.sources)
assert_equal(suite.name, 'Pass And Fail & Normal')
def test_create_with_datasource_as_string(self):
suite = api.TestSuiteBuilder().build(self.sources[0])
assert_equal(suite.name, 'Pass And Fail')
if __name__ == '__main__':
unittest.main()
| 37.351064 | 86 | 0.720308 | import unittest
from os.path import join
from robot import api, model, parsing, reporting, result, running
from robot.api import parsing as api_parsing
from robot.utils.asserts import assert_equal, assert_true
class TestExposedApi(unittest.TestCase):
def test_execution_result(self):
assert_equal(api.ExecutionResult, result.ExecutionResult)
def test_test_suite(self):
assert_equal(api.TestSuite, running.TestSuite)
def test_result_writer(self):
assert_equal(api.ResultWriter, reporting.ResultWriter)
def test_visitors(self):
assert_equal(api.SuiteVisitor, model.SuiteVisitor)
assert_equal(api.ResultVisitor, result.ResultVisitor)
def test_deprecated_parsing(self):
assert_equal(api.get_model, parsing.get_model)
assert_equal(api.get_resource_model, parsing.get_resource_model)
assert_equal(api.get_tokens, parsing.get_tokens)
assert_equal(api.get_resource_tokens, parsing.get_resource_tokens)
assert_equal(api.Token, parsing.Token)
def test_parsing_getters(self):
assert_equal(api_parsing.get_model, parsing.get_model)
assert_equal(api_parsing.get_resource_model, parsing.get_resource_model)
assert_equal(api_parsing.get_tokens, parsing.get_tokens)
assert_equal(api_parsing.get_resource_tokens, parsing.get_resource_tokens)
def test_parsing_token(self):
assert_equal(api_parsing.Token, parsing.Token)
def test_parsing_model_statements(self):
for cls in parsing.model.Statement._statement_handlers.values():
assert_equal(getattr(api_parsing, cls.__name__), cls)
assert_true(not hasattr(api_parsing, 'Statement'))
def test_parsing_model_blocks(self):
for name in ('File', 'SettingSection', 'VariableSection', 'TestCaseSection',
'KeywordSection', 'CommentSection', 'TestCase', 'Keyword', 'For',
'If'):
assert_equal(getattr(api_parsing, name), getattr(parsing.model, name))
assert_true(not hasattr(api_parsing, 'Block'))
def test_parsing_visitors(self):
assert_equal(api_parsing.ModelVisitor, parsing.ModelVisitor)
assert_equal(api_parsing.ModelTransformer, parsing.ModelTransformer)
class TestModelObjects(unittest.TestCase):
def test_running_objects(self):
assert_true(running.TestSuite)
assert_true(running.TestCase)
assert_true(running.Keyword)
def test_result_objects(self):
assert_true(result.TestSuite)
assert_true(result.TestCase)
assert_true(result.Keyword)
class TestTestSuiteBuilder(unittest.TestCase):
sources = [join(__file__, '../../../atest/testdata/misc', name)
for name in ('pass_and_fail.robot', 'normal.robot')]
def test_create_with_datasources_as_list(self):
suite = api.TestSuiteBuilder().build(*self.sources)
assert_equal(suite.name, 'Pass And Fail & Normal')
def test_create_with_datasource_as_string(self):
suite = api.TestSuiteBuilder().build(self.sources[0])
assert_equal(suite.name, 'Pass And Fail')
if __name__ == '__main__':
unittest.main()
| true | true |
f731caf89a847b286acea5dd16d4a275ed119bd1 | 764 | py | Python | app/db/database.py | Huangkai1008/market-admin | c149f425b640827526b65343b4bceecd528418bf | [
"MIT"
] | 6 | 2020-01-30T10:45:22.000Z | 2021-05-26T02:51:43.000Z | app/db/database.py | Huangkai1008/market-admin | c149f425b640827526b65343b4bceecd528418bf | [
"MIT"
] | null | null | null | app/db/database.py | Huangkai1008/market-admin | c149f425b640827526b65343b4bceecd528418bf | [
"MIT"
] | null | null | null | from tortoise import Tortoise
from loguru import logger
from app.core.config import DB_TYPE, DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DATABASE
DB_URL = f'{DB_TYPE}://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DATABASE}'
async def init():
"""初始化连接"""
logger.info(f'Connecting to database')
await Tortoise.init(
db_url=DB_URL,
modules={
'db': ['app.db.category', 'app.db.brand', 'app.db.store', 'app.db.product']
},
)
logger.info(f'Connection established')
await Tortoise.generate_schemas()
logger.info(f'Schema generated')
async def disconnect():
"""停止连接"""
logger.info('Closing connection to database')
await Tortoise.close_connections()
logger.info('Connection closed')
| 22.470588 | 87 | 0.660995 | from tortoise import Tortoise
from loguru import logger
from app.core.config import DB_TYPE, DB_USER, DB_PASSWORD, DB_HOST, DB_PORT, DATABASE
DB_URL = f'{DB_TYPE}://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DATABASE}'
async def init():
logger.info(f'Connecting to database')
await Tortoise.init(
db_url=DB_URL,
modules={
'db': ['app.db.category', 'app.db.brand', 'app.db.store', 'app.db.product']
},
)
logger.info(f'Connection established')
await Tortoise.generate_schemas()
logger.info(f'Schema generated')
async def disconnect():
logger.info('Closing connection to database')
await Tortoise.close_connections()
logger.info('Connection closed')
| true | true |
f731cb10fc8ac86aeacc7f0d065059f3fcf77ebf | 8,623 | py | Python | test/geometry/test_homography.py | shaunster0/kornia | 71acf455ee36f2050b7be5ea993b6db773f502eb | [
"ECL-2.0",
"Apache-2.0"
] | 51 | 2019-10-11T18:47:30.000Z | 2021-05-03T06:42:37.000Z | test/geometry/test_homography.py | shaunster0/kornia | 71acf455ee36f2050b7be5ea993b6db773f502eb | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2022-01-25T00:28:05.000Z | 2022-03-20T09:14:39.000Z | test/geometry/test_homography.py | shaunster0/kornia | 71acf455ee36f2050b7be5ea993b6db773f502eb | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2019-10-11T19:59:05.000Z | 2020-07-10T02:28:52.000Z | import random
import pytest
import torch
from torch.autograd import gradcheck
import kornia
from kornia.geometry.homography import find_homography_dlt, find_homography_dlt_iterated
from kornia.testing import assert_close
class TestFindHomographyDLT:
def test_smoke(self, device, dtype):
points1 = torch.rand(1, 4, 2, device=device, dtype=dtype)
points2 = torch.rand(1, 4, 2, device=device, dtype=dtype)
weights = torch.ones(1, 4, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, weights)
assert H.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, weights)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape_noweights(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, None)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_points_noweights(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H_noweights = find_homography_dlt(points1, points2, None)
H_withweights = find_homography_dlt(points1, points2, weights)
assert H_noweights.shape == (B, 3, 3) and H_withweights.shape == (B, 3, 3)
assert_close(H_noweights, H_withweights, rtol=1e-3, atol=1e-4)
@pytest.mark.parametrize("batch_size", [1, 2, 5])
def test_clean_points(self, batch_size, device, dtype):
# generate input data
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * 0.3 * torch.rand_like(H)
H = H / H[:, 2:3, 2:3]
points_dst = kornia.transform_points(H, points_src)
weights = torch.ones(batch_size, 10, device=device, dtype=dtype)
# compute transform from source to target
dst_homo_src = find_homography_dlt(points_src, points_dst, weights)
assert_close(kornia.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4)
@pytest.mark.grad
@pytest.mark.skipif(torch.__version__ < '1.7', reason="pytorch bug of incopatible types: #33546 fixed in v1.7")
def test_gradcheck(self, device):
# Save initial seed
initial_seed = torch.random.initial_seed()
max_number_of_checks = 10
# Test gradients for a max_number_of_checks times
current_seed = initial_seed
for i in range(max_number_of_checks):
torch.manual_seed(current_seed)
points_src = torch.rand(1, 10, 2, device=device, dtype=torch.float64, requires_grad=True)
points_dst = torch.rand_like(points_src)
weights = torch.ones_like(points_src)[..., 0]
try:
gradcheck(
find_homography_dlt, (points_src, points_dst, weights), rtol=1e-6, atol=1e-6, raise_exception=True
)
# Gradcheck failed
except RuntimeError:
# All iterations failed
if i == max_number_of_checks - 1:
assert gradcheck(
find_homography_dlt,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
# Next iteration
else:
current_seed = random.randrange(0xFFFFFFFFFFFFFFFF)
continue
# Gradcheck succeed
torch.manual_seed(initial_seed)
return
class TestFindHomographyDLTIter:
def test_smoke(self, device, dtype):
points1 = torch.rand(1, 4, 2, device=device, dtype=dtype)
points2 = torch.rand(1, 4, 2, device=device, dtype=dtype)
weights = torch.ones(1, 4, device=device, dtype=dtype)
H = find_homography_dlt_iterated(points1, points2, weights, 5)
assert H.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H = find_homography_dlt_iterated(points1, points2, weights, 5)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size", [1, 2])
def test_clean_points(self, batch_size, device, dtype):
# generate input data
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * 0.3 * torch.rand_like(H)
H = H / H[:, 2:3, 2:3]
points_dst = kornia.transform_points(H, points_src)
weights = torch.ones(batch_size, 10, device=device, dtype=dtype)
# compute transform from source to target
dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 10)
assert_close(kornia.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4)
@pytest.mark.grad
@pytest.mark.skipif(torch.__version__ < '1.7', reason="pytorch bug of incopatible types: #33546 fixed in v1.7")
def test_gradcheck(self, device):
# Save initial seed
initial_seed = torch.random.initial_seed()
max_number_of_checks = 10
# Test gradients for a max_number_of_checks times
current_seed = initial_seed
for i in range(max_number_of_checks):
torch.manual_seed(current_seed)
points_src = torch.rand(1, 10, 2, device=device, dtype=torch.float64, requires_grad=True)
points_dst = torch.rand_like(points_src)
weights = torch.ones_like(points_src)[..., 0]
try:
gradcheck(
find_homography_dlt_iterated,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
# Gradcheck failed
except RuntimeError:
# All iterations failed
if i == max_number_of_checks - 1:
assert gradcheck(
find_homography_dlt_iterated,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
# Next iteration
else:
current_seed = random.randrange(0xFFFFFFFFFFFFFFFF)
continue
# Gradcheck succeed
torch.manual_seed(initial_seed)
return
@pytest.mark.grad
@pytest.mark.parametrize("batch_size", [1, 2])
def test_dirty_points_and_gradcheck(self, batch_size, device, dtype):
# generate input data
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * 0.3 * torch.rand_like(H)
H = H / H[:, 2:3, 2:3]
points_src = 100.0 * torch.rand(batch_size, 20, 2, device=device, dtype=dtype)
points_dst = kornia.transform_points(H, points_src)
# making last point an outlier
points_dst[:, -1, :] += 20
weights = torch.ones(batch_size, 20, device=device, dtype=dtype)
# compute transform from source to target
dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 0.5, 10)
assert_close(
kornia.transform_points(dst_homo_src, points_src[:, :-1]), points_dst[:, :-1], rtol=1e-3, atol=1e-3
)
| 41.859223 | 118 | 0.605938 | import random
import pytest
import torch
from torch.autograd import gradcheck
import kornia
from kornia.geometry.homography import find_homography_dlt, find_homography_dlt_iterated
from kornia.testing import assert_close
class TestFindHomographyDLT:
def test_smoke(self, device, dtype):
points1 = torch.rand(1, 4, 2, device=device, dtype=dtype)
points2 = torch.rand(1, 4, 2, device=device, dtype=dtype)
weights = torch.ones(1, 4, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, weights)
assert H.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, weights)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape_noweights(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
H = find_homography_dlt(points1, points2, None)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_points_noweights(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H_noweights = find_homography_dlt(points1, points2, None)
H_withweights = find_homography_dlt(points1, points2, weights)
assert H_noweights.shape == (B, 3, 3) and H_withweights.shape == (B, 3, 3)
assert_close(H_noweights, H_withweights, rtol=1e-3, atol=1e-4)
@pytest.mark.parametrize("batch_size", [1, 2, 5])
def test_clean_points(self, batch_size, device, dtype):
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * 0.3 * torch.rand_like(H)
H = H / H[:, 2:3, 2:3]
points_dst = kornia.transform_points(H, points_src)
weights = torch.ones(batch_size, 10, device=device, dtype=dtype)
dst_homo_src = find_homography_dlt(points_src, points_dst, weights)
assert_close(kornia.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4)
@pytest.mark.grad
@pytest.mark.skipif(torch.__version__ < '1.7', reason="pytorch bug of incopatible types: #33546 fixed in v1.7")
def test_gradcheck(self, device):
initial_seed = torch.random.initial_seed()
max_number_of_checks = 10
current_seed = initial_seed
for i in range(max_number_of_checks):
torch.manual_seed(current_seed)
points_src = torch.rand(1, 10, 2, device=device, dtype=torch.float64, requires_grad=True)
points_dst = torch.rand_like(points_src)
weights = torch.ones_like(points_src)[..., 0]
try:
gradcheck(
find_homography_dlt, (points_src, points_dst, weights), rtol=1e-6, atol=1e-6, raise_exception=True
)
except RuntimeError:
if i == max_number_of_checks - 1:
assert gradcheck(
find_homography_dlt,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
else:
current_seed = random.randrange(0xFFFFFFFFFFFFFFFF)
continue
torch.manual_seed(initial_seed)
return
class TestFindHomographyDLTIter:
def test_smoke(self, device, dtype):
points1 = torch.rand(1, 4, 2, device=device, dtype=dtype)
points2 = torch.rand(1, 4, 2, device=device, dtype=dtype)
weights = torch.ones(1, 4, device=device, dtype=dtype)
H = find_homography_dlt_iterated(points1, points2, weights, 5)
assert H.shape == (1, 3, 3)
@pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)])
def test_shape(self, batch_size, num_points, device, dtype):
B, N = batch_size, num_points
points1 = torch.rand(B, N, 2, device=device, dtype=dtype)
points2 = torch.rand(B, N, 2, device=device, dtype=dtype)
weights = torch.ones(B, N, device=device, dtype=dtype)
H = find_homography_dlt_iterated(points1, points2, weights, 5)
assert H.shape == (B, 3, 3)
@pytest.mark.parametrize("batch_size", [1, 2])
def test_clean_points(self, batch_size, device, dtype):
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * 0.3 * torch.rand_like(H)
H = H / H[:, 2:3, 2:3]
points_dst = kornia.transform_points(H, points_src)
weights = torch.ones(batch_size, 10, device=device, dtype=dtype)
dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 10)
assert_close(kornia.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4)
@pytest.mark.grad
@pytest.mark.skipif(torch.__version__ < '1.7', reason="pytorch bug of incopatible types: #33546 fixed in v1.7")
def test_gradcheck(self, device):
initial_seed = torch.random.initial_seed()
max_number_of_checks = 10
current_seed = initial_seed
for i in range(max_number_of_checks):
torch.manual_seed(current_seed)
points_src = torch.rand(1, 10, 2, device=device, dtype=torch.float64, requires_grad=True)
points_dst = torch.rand_like(points_src)
weights = torch.ones_like(points_src)[..., 0]
try:
gradcheck(
find_homography_dlt_iterated,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
except RuntimeError:
if i == max_number_of_checks - 1:
assert gradcheck(
find_homography_dlt_iterated,
(points_src, points_dst, weights),
rtol=1e-6,
atol=1e-6,
raise_exception=True,
)
else:
current_seed = random.randrange(0xFFFFFFFFFFFFFFFF)
continue
torch.manual_seed(initial_seed)
return
@pytest.mark.grad
@pytest.mark.parametrize("batch_size", [1, 2])
def test_dirty_points_and_gradcheck(self, batch_size, device, dtype):
points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
H = kornia.eye_like(3, points_src)
H = H * 0.3 * torch.rand_like(H)
H = H / H[:, 2:3, 2:3]
points_src = 100.0 * torch.rand(batch_size, 20, 2, device=device, dtype=dtype)
points_dst = kornia.transform_points(H, points_src)
points_dst[:, -1, :] += 20
weights = torch.ones(batch_size, 20, device=device, dtype=dtype)
dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 0.5, 10)
assert_close(
kornia.transform_points(dst_homo_src, points_src[:, :-1]), points_dst[:, :-1], rtol=1e-3, atol=1e-3
)
| true | true |
f731cba9a9650e31323b7b28e22fc1895735634d | 9,219 | py | Python | code/combine_sectors.py | jradavenport/helloTESS | 1bd4680640e7b92ae3b2eeba19cc63e8b834eead | [
"MIT"
] | null | null | null | code/combine_sectors.py | jradavenport/helloTESS | 1bd4680640e7b92ae3b2eeba19cc63e8b834eead | [
"MIT"
] | null | null | null | code/combine_sectors.py | jradavenport/helloTESS | 1bd4680640e7b92ae3b2eeba19cc63e8b834eead | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import os
from glob import glob
from matplotlib.colors import LogNorm
from scipy.optimize import curve_fit
from astropy.table import Table
import astropy.io.fits as fits
from astropy.stats import LombScargle, BoxLeastSquares
import exoplanet as xo
from stuff import FINDflare, EasyE
matplotlib.rcParams.update({'font.size':18})
matplotlib.rcParams.update({'font.family':'serif'})
ftype = '.pdf'
def RunSectors(tess_dir = '/Users/james/Desktop/tess/', run_dir = '/Users/james/Desktop/helloTESS/', clobber=True, Nsector=3):
'''
Do some simplier things on stars that are observed in mulitple sectors
should probably be combined with run_sector.py.... but oh well for now!
'''
sectors = ['sector001', 'sector002', 'sector003', 'sector004', 'sector005', 'sector006', 'sector007', 'sector008']
# just in case glob wants to re-order things, be sure grab them in Sector order
files = []
for k in range(len(sectors)):
files = files + glob(tess_dir + sectors[k] + '/*.fits', recursive=True)
# get the unique object IDs (NOT the simplest way, but matches the next step)
obj = pd.Series(files).str.split('-', expand=True).groupby(by=2).count().index
# get the count of unique object IDs
Nobj = pd.Series(files).str.split('-', expand=True).groupby(by=2).count()[0]
# for k in range(max(Nobj)):
# print(k+1, sum(Nobj > k))
# obj[0] # example Object ID (TIC #)
o5 = np.where((Nobj > Nsector))[0] # was named "o5" because originally wanted Over 5 observations. Now pick other N
print(str(len(o5)) + ' objects with Nobs > 3 Sectors')
for k in range(0, len(o5)):
print(k, obj[o5][k])
files_k = pd.Series(files)[np.where((pd.Series(files).str.split('-', expand=True)[2] == obj[o5][k]))[0]].values
rot_out_k = MultiSector(files_k, clobber=clobber)
if k==0:
rot_out = rot_out_k
else:
rot_out = pd.concat([rot_out, rot_out_k], ignore_index=True, sort=False)
rot_out.to_csv(run_dir + '/outputs/longerP_rot_out.csv')
return
def MultiSector(TICs, tess_dir = '/Users/james/Desktop/tess/',
run_dir = '/Users/james/Desktop/helloTESS/',
clobber=False):
'''
Run the basic set of tools on every light curve -> NOW FOR MULTI-SECTOR DATA
Produce a diagnostic plot for each light curve
'''
if not os.path.isdir(run_dir + 'figures/longerP'):
os.makedirs(run_dir + 'figures/longerP')
tbit = False
for k in range(len(TICs)):
tbl = -1
try:
tbl = Table.read(TICs[k], format='fits')
tbl['PDCSAP_FLUX'] = tbl['PDCSAP_FLUX'] - np.nanmedian(tbl['PDCSAP_FLUX'])
if tbit == False:
df_tbl = tbl.to_pandas()
tbit = True
else:
df_tbl = pd.concat([df_tbl, tbl.to_pandas()], ignore_index=True, sort=False)
except:
tbl = -1
print('bad file: ' + TICs[k])
df_tbl['PDCSAP_FLUX'] = df_tbl['PDCSAP_FLUX'] + np.nanmedian(df_tbl['SAP_FLUX'])
# make harsh quality cuts, and chop out a known bad window of time (might add more later)
AOK = (df_tbl['QUALITY'] == 0) & ((df_tbl['TIME'] < 1347) | (df_tbl['TIME'] > 1350))
# do a running median for a basic smooth
smo = df_tbl['PDCSAP_FLUX'][AOK].rolling(128, center=True).median().values
med = np.nanmedian(smo)
# make an output plot for every file
figname = run_dir + 'figures/longerP/' + TICs[0].split('-')[2] + '.jpeg'
makefig = ((not os.path.exists(figname)) | clobber)
if makefig:
plt.figure(figsize=(14,6))
plt.errorbar(df_tbl['TIME'][AOK], df_tbl['PDCSAP_FLUX'][AOK]/med, yerr=df_tbl['PDCSAP_FLUX_ERR'][AOK]/med,
linestyle=None, alpha=0.25, label='PDC_FLUX')
plt.plot(df_tbl['TIME'][AOK], smo/med, label='128pt MED', c='orange')
# Smed = np.nanmedian(df_tbl['SAP_FLUX'][AOK])
# plt.errorbar(df_tbl['TIME'][AOK], df_tbl['SAP_FLUX'][AOK]/Smed, yerr=df_tbl['SAP_FLUX_ERR'][AOK]/Smed,
# linestyle=None, alpha=0.25, label='SAP_FLUX')
# require at least 1000 good datapoints for analysis
if sum(AOK) > 1000:
# find OK points in the smoothed LC
SOK = np.isfinite(smo)
# Lomb Scargle
LS = LombScargle(df_tbl['TIME'][AOK][SOK], smo[SOK]/med, dy=df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med)
frequency, power = LS.autopower(minimum_frequency=1./40.,
maximum_frequency=1./0.1,
samples_per_peak=7)
best_frequency = frequency[np.argmax(power)]
per_out = 1./best_frequency
per_amp = np.nanmax(power)
per_med = np.nanmedian(power)
per_std = np.nanstd(smo[SOK]/med)
if np.nanmax(power) > 0.2:
LSmodel = LS.model(df_tbl['TIME'][AOK][SOK], best_frequency)
if makefig:
plt.plot(df_tbl['TIME'][AOK][SOK], LSmodel,
label='L-S P='+format(1./best_frequency, '6.3f')+'d, pk='+format(np.nanmax(power), '6.3f'),
c='green')
# ACF w/ Exoplanet package
acf = xo.autocorr_estimator(df_tbl['TIME'][AOK][SOK].values, smo[SOK]/med,
yerr=df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK].values/med,
min_period=0.1, max_period=40, max_peaks=2)
ACF_1pk = -1
ACF_1dt = -1
if len(acf['peaks']) > 0:
ACF_1dt = acf['peaks'][0]['period']
ACF_1pk = acf['autocorr'][1][np.where((acf['autocorr'][0] == acf['peaks'][0]['period']))[0]][0]
if makefig:
plt.plot(df_tbl['TIME'][AOK][SOK],
np.nanstd(smo[SOK]/med) * ACF_1pk * np.sin(df_tbl['TIME'][AOK][SOK] / ACF_1dt * 2 * np.pi) + 1,
label = 'ACF=' + format(ACF_1dt, '6.3f') + 'd, pk=' + format(ACF_1pk, '6.3f'), lw=2,
alpha=0.7, c='FireBrick')
# here is where a simple Eclipse (EB) finder goes
EE = EasyE(smo[SOK]/med, df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med, N1=5, N2=3, N3=2)
EclFlg = 0
if np.size(EE) > 0:
EclFlg = 1
if makefig:
for j in range(len(EE[0])):
plt.scatter(df_tbl['TIME'][AOK][SOK][(EE[0][j]):(EE[1][j]+1)],
smo[SOK] [(EE[0][j]):(EE[1][j]+1)] / med,
color='k', marker='s', s=5, alpha=0.75, label='_nolegend_')
plt.scatter([],[], color='k', marker='s', s=5, alpha=0.75, label='Ecl?')
# add BLS
# bls = BoxLeastSquares(df_tbl['TIME'][AOK][SOK], smo[SOK]/med, dy=df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med)
# blsP = bls.autopower(0.1, method='fast', objective='snr')
# blsPer = blsP['period'][np.argmax(blsP['power'])]
# if ((4*np.nanstd(blsP['power']) + np.nanmedian(blsP['power']) < np.nanmax(blsP['power'])) &
# (np.nanmax(blsP['power']) > 50.) &
# (blsPer < 0.95 * np.nanmax(blsP['period']))
# ):
# blsPeriod = blsPer
# blsAmpl = np.nanmax(blsP['power'])
# plt.plot([],[], ' ', label='BLS='+format(blsPer, '6.3f')+'d')
if makefig:
plt.title(TICs[0].split('-')[2], fontsize=12)
plt.ylabel('Flux')
plt.xlabel('BJD - 2457000 (days)')
plt.legend(fontsize=10)
plt.savefig(figname, bbox_inches='tight', pad_inches=0.25, dpi=100)
plt.close()
# # write per-sector output files
# ALL_TIC = pd.Series(files_i).str.split('-', expand=True).iloc[:,-3].astype('int')
# flare_out = pd.DataFrame(data={'TIC':ALL_TIC[FL_id], 'i0':FL_t0, 'i1':FL_t1, 'med':FL_f0, 'peak':FL_f1})
# flare_out.to_csv(run_dir + sector + '_flare_out.csv')
rot_out = pd.DataFrame(data={'TIC':TICs[0].split('-')[2],
'per':per_out, 'Pamp':per_amp, 'Pmed':per_med, 'StdLC':per_std,
'acf_pk':ACF_1pk, 'acf_per':ACF_1dt, 'ecl_flg':EclFlg}, index=[0])
# 'bls_period':blsPeriod, 'bls_ampl':blsAmpl, )
# rot_out.to_csv(run_dir + sector + '_rot_out.csv')
return rot_out
if __name__ == "__main__":
'''
let this file be called from the terminal directly. e.g.:
$ python analysis.py
'''
RunSectors()
### junk code i probably dont need
# sect1 = glob(tess_dir + sectors[0] + '/*.fits', recursive=True)
# sect2 = glob(tess_dir + sectors[1] + '/*.fits', recursive=True)
# sect3 = glob(tess_dir + sectors[2] + '/*.fits', recursive=True)
# sect4 = glob(tess_dir + sectors[3] + '/*.fits', recursive=True)
# sect5 = glob(tess_dir + sectors[4] + '/*.fits', recursive=True)
# sect6 = glob(tess_dir + sectors[5] + '/*.fits', recursive=True)
#
# files = sect1 + sect2 + sect3 + sect4 + sect5 + sect6
# # make into an array for looping later!
# s_lens = [len(sect1), len(sect2), len(sect3), len(sect4), len(sect5), len(sect6)]
# print(s_lens, len(files))
| 38.735294 | 126 | 0.574574 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import os
from glob import glob
from matplotlib.colors import LogNorm
from scipy.optimize import curve_fit
from astropy.table import Table
import astropy.io.fits as fits
from astropy.stats import LombScargle, BoxLeastSquares
import exoplanet as xo
from stuff import FINDflare, EasyE
matplotlib.rcParams.update({'font.size':18})
matplotlib.rcParams.update({'font.family':'serif'})
ftype = '.pdf'
def RunSectors(tess_dir = '/Users/james/Desktop/tess/', run_dir = '/Users/james/Desktop/helloTESS/', clobber=True, Nsector=3):
sectors = ['sector001', 'sector002', 'sector003', 'sector004', 'sector005', 'sector006', 'sector007', 'sector008']
files = []
for k in range(len(sectors)):
files = files + glob(tess_dir + sectors[k] + '/*.fits', recursive=True)
obj = pd.Series(files).str.split('-', expand=True).groupby(by=2).count().index
Nobj = pd.Series(files).str.split('-', expand=True).groupby(by=2).count()[0]
sector))[0]
print(str(len(o5)) + ' objects with Nobs > 3 Sectors')
for k in range(0, len(o5)):
print(k, obj[o5][k])
files_k = pd.Series(files)[np.where((pd.Series(files).str.split('-', expand=True)[2] == obj[o5][k]))[0]].values
rot_out_k = MultiSector(files_k, clobber=clobber)
if k==0:
rot_out = rot_out_k
else:
rot_out = pd.concat([rot_out, rot_out_k], ignore_index=True, sort=False)
rot_out.to_csv(run_dir + '/outputs/longerP_rot_out.csv')
return
def MultiSector(TICs, tess_dir = '/Users/james/Desktop/tess/',
run_dir = '/Users/james/Desktop/helloTESS/',
clobber=False):
if not os.path.isdir(run_dir + 'figures/longerP'):
os.makedirs(run_dir + 'figures/longerP')
tbit = False
for k in range(len(TICs)):
tbl = -1
try:
tbl = Table.read(TICs[k], format='fits')
tbl['PDCSAP_FLUX'] = tbl['PDCSAP_FLUX'] - np.nanmedian(tbl['PDCSAP_FLUX'])
if tbit == False:
df_tbl = tbl.to_pandas()
tbit = True
else:
df_tbl = pd.concat([df_tbl, tbl.to_pandas()], ignore_index=True, sort=False)
except:
tbl = -1
print('bad file: ' + TICs[k])
df_tbl['PDCSAP_FLUX'] = df_tbl['PDCSAP_FLUX'] + np.nanmedian(df_tbl['SAP_FLUX'])
AOK = (df_tbl['QUALITY'] == 0) & ((df_tbl['TIME'] < 1347) | (df_tbl['TIME'] > 1350))
smo = df_tbl['PDCSAP_FLUX'][AOK].rolling(128, center=True).median().values
med = np.nanmedian(smo)
figname = run_dir + 'figures/longerP/' + TICs[0].split('-')[2] + '.jpeg'
makefig = ((not os.path.exists(figname)) | clobber)
if makefig:
plt.figure(figsize=(14,6))
plt.errorbar(df_tbl['TIME'][AOK], df_tbl['PDCSAP_FLUX'][AOK]/med, yerr=df_tbl['PDCSAP_FLUX_ERR'][AOK]/med,
linestyle=None, alpha=0.25, label='PDC_FLUX')
plt.plot(df_tbl['TIME'][AOK], smo/med, label='128pt MED', c='orange')
if sum(AOK) > 1000:
SOK = np.isfinite(smo)
LS = LombScargle(df_tbl['TIME'][AOK][SOK], smo[SOK]/med, dy=df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med)
frequency, power = LS.autopower(minimum_frequency=1./40.,
maximum_frequency=1./0.1,
samples_per_peak=7)
best_frequency = frequency[np.argmax(power)]
per_out = 1./best_frequency
per_amp = np.nanmax(power)
per_med = np.nanmedian(power)
per_std = np.nanstd(smo[SOK]/med)
if np.nanmax(power) > 0.2:
LSmodel = LS.model(df_tbl['TIME'][AOK][SOK], best_frequency)
if makefig:
plt.plot(df_tbl['TIME'][AOK][SOK], LSmodel,
label='L-S P='+format(1./best_frequency, '6.3f')+'d, pk='+format(np.nanmax(power), '6.3f'),
c='green')
acf = xo.autocorr_estimator(df_tbl['TIME'][AOK][SOK].values, smo[SOK]/med,
yerr=df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK].values/med,
min_period=0.1, max_period=40, max_peaks=2)
ACF_1pk = -1
ACF_1dt = -1
if len(acf['peaks']) > 0:
ACF_1dt = acf['peaks'][0]['period']
ACF_1pk = acf['autocorr'][1][np.where((acf['autocorr'][0] == acf['peaks'][0]['period']))[0]][0]
if makefig:
plt.plot(df_tbl['TIME'][AOK][SOK],
np.nanstd(smo[SOK]/med) * ACF_1pk * np.sin(df_tbl['TIME'][AOK][SOK] / ACF_1dt * 2 * np.pi) + 1,
label = 'ACF=' + format(ACF_1dt, '6.3f') + 'd, pk=' + format(ACF_1pk, '6.3f'), lw=2,
alpha=0.7, c='FireBrick')
EE = EasyE(smo[SOK]/med, df_tbl['PDCSAP_FLUX_ERR'][AOK][SOK]/med, N1=5, N2=3, N3=2)
EclFlg = 0
if np.size(EE) > 0:
EclFlg = 1
if makefig:
for j in range(len(EE[0])):
plt.scatter(df_tbl['TIME'][AOK][SOK][(EE[0][j]):(EE[1][j]+1)],
smo[SOK] [(EE[0][j]):(EE[1][j]+1)] / med,
color='k', marker='s', s=5, alpha=0.75, label='_nolegend_')
plt.scatter([],[], color='k', marker='s', s=5, alpha=0.75, label='Ecl?')
if makefig:
plt.title(TICs[0].split('-')[2], fontsize=12)
plt.ylabel('Flux')
plt.xlabel('BJD - 2457000 (days)')
plt.legend(fontsize=10)
plt.savefig(figname, bbox_inches='tight', pad_inches=0.25, dpi=100)
plt.close()
e(data={'TIC':TICs[0].split('-')[2],
'per':per_out, 'Pamp':per_amp, 'Pmed':per_med, 'StdLC':per_std,
'acf_pk':ACF_1pk, 'acf_per':ACF_1dt, 'ecl_flg':EclFlg}, index=[0])
return rot_out
if __name__ == "__main__":
RunSectors()
| true | true |
f731cbc6feb59399bb774f79ba1a3cf4105984b2 | 1,268 | py | Python | python/pw_multiScriptEditor/widgets/findWidget.py | ZBYVFX/NukeToolSet | a077aa9d8ac941f534d33dfebd27994fc9cefba7 | [
"MIT"
] | 1 | 2021-01-05T12:28:35.000Z | 2021-01-05T12:28:35.000Z | python/pw_multiScriptEditor/widgets/findWidget.py | ensii75/NukeToolSet | 0c47efc3bc7ca513f902e00a3e2b71404636aae9 | [
"MIT"
] | null | null | null | python/pw_multiScriptEditor/widgets/findWidget.py | ensii75/NukeToolSet | 0c47efc3bc7ca513f902e00a3e2b71404636aae9 | [
"MIT"
] | null | null | null | from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
import findWidget_UIs as ui
class findWidgetClass(QWidget, ui.Ui_findReplace):
searchSignal = Signal(str)
replaceSignal = Signal(list)
replaceAllSignal = Signal(list)
def __init__(self, parent):
super(findWidgetClass, self).__init__(parent)
self.setupUi(self)
self.setWindowFlags(Qt.Tool)
center = parent.parent().mapToGlobal(parent.geometry().center())
myGeo = self.geometry()
myGeo.moveCenter(center)
self.setGeometry(myGeo)
self.find_le.setFocus()
#connect
self.find_btn.clicked.connect(self.search)
self.find_le.returnPressed.connect(self.search)
self.replace_btn.clicked.connect(self.replace)
self.replace_le.returnPressed.connect(self.replace)
self.replaceAll_btn.clicked.connect(self.replaceAll)
def search(self):
self.searchSignal.emit(self.find_le.text())
def replace(self):
find = self.find_le.text()
rep = self.replace_le.text()
self.replaceSignal.emit([find, rep])
def replaceAll(self):
find = self.find_le.text()
rep = self.replace_le.text()
self.replaceAllSignal.emit([find, rep]) | 34.27027 | 72 | 0.670347 | from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
import findWidget_UIs as ui
class findWidgetClass(QWidget, ui.Ui_findReplace):
searchSignal = Signal(str)
replaceSignal = Signal(list)
replaceAllSignal = Signal(list)
def __init__(self, parent):
super(findWidgetClass, self).__init__(parent)
self.setupUi(self)
self.setWindowFlags(Qt.Tool)
center = parent.parent().mapToGlobal(parent.geometry().center())
myGeo = self.geometry()
myGeo.moveCenter(center)
self.setGeometry(myGeo)
self.find_le.setFocus()
self.find_btn.clicked.connect(self.search)
self.find_le.returnPressed.connect(self.search)
self.replace_btn.clicked.connect(self.replace)
self.replace_le.returnPressed.connect(self.replace)
self.replaceAll_btn.clicked.connect(self.replaceAll)
def search(self):
self.searchSignal.emit(self.find_le.text())
def replace(self):
find = self.find_le.text()
rep = self.replace_le.text()
self.replaceSignal.emit([find, rep])
def replaceAll(self):
find = self.find_le.text()
rep = self.replace_le.text()
self.replaceAllSignal.emit([find, rep]) | true | true |
f731cd74c0ad4505283364c86d8ef5503e3e88c9 | 2,763 | py | Python | dmutils/user.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | dmutils/user.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | dmutils/user.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | import hashlib
import base64
def hash_email(email):
m = hashlib.sha256()
m.update(email.encode('utf-8'))
return base64.urlsafe_b64encode(m.digest())
def user_logging_string(user):
if user.is_anonymous:
return 'User(anonymous)'
return 'User(id={}, role={}, hashed_email={})'.format(user.id, user.role, hash_email(user.email_address))
def user_has_role(user, role):
try:
return user['users']['role'] == role
except (KeyError, TypeError):
return False
class User():
def __init__(self, user_id, email_address, supplier_code, supplier_name,
locked, active, name, role):
self.id = user_id
self.email_address = email_address
self.name = name
self.role = role
self.supplier_code = supplier_code
self.supplier_name = supplier_name
self.locked = locked
self.active = active
@property
def is_authenticated(self):
return self.is_active
@property
def is_active(self):
return self.active and not self.locked
@property
def is_locked(self):
return self.locked
@property
def is_anonymous(self):
return False
def has_role(self, role):
return self.role == role
def has_any_role(self, *roles):
return any(self.has_role(role) for role in roles)
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def serialize(self):
return {
'id': self.id,
'name': self.name,
'emailAddress': self.email_address,
'supplierCode': self.supplier_code,
'supplierName': self.supplier_name,
'locked': self.locked,
}
@staticmethod
def from_json(user_json):
user = user_json["users"]
supplier_code = None
supplier_name = None
if "supplier" in user:
supplier_code = user["supplier"]["supplierCode"]
supplier_name = user["supplier"]["name"]
return User(
user_id=user["id"],
email_address=user['emailAddress'],
supplier_code=supplier_code,
supplier_name=supplier_name,
locked=user.get('locked', False),
active=user.get('active', True),
name=user['name'],
role=user['role']
)
@staticmethod
def load_user(data_api_client, user_id):
"""Load a user from the API and hydrate the User model"""
user_json = data_api_client.get_user(user_id=int(user_id))
if user_json:
user = User.from_json(user_json)
if user.is_active:
return user
| 26.825243 | 109 | 0.591386 | import hashlib
import base64
def hash_email(email):
m = hashlib.sha256()
m.update(email.encode('utf-8'))
return base64.urlsafe_b64encode(m.digest())
def user_logging_string(user):
if user.is_anonymous:
return 'User(anonymous)'
return 'User(id={}, role={}, hashed_email={})'.format(user.id, user.role, hash_email(user.email_address))
def user_has_role(user, role):
try:
return user['users']['role'] == role
except (KeyError, TypeError):
return False
class User():
def __init__(self, user_id, email_address, supplier_code, supplier_name,
locked, active, name, role):
self.id = user_id
self.email_address = email_address
self.name = name
self.role = role
self.supplier_code = supplier_code
self.supplier_name = supplier_name
self.locked = locked
self.active = active
@property
def is_authenticated(self):
return self.is_active
@property
def is_active(self):
return self.active and not self.locked
@property
def is_locked(self):
return self.locked
@property
def is_anonymous(self):
return False
def has_role(self, role):
return self.role == role
def has_any_role(self, *roles):
return any(self.has_role(role) for role in roles)
def get_id(self):
try:
return unicode(self.id)
except NameError:
return str(self.id)
def serialize(self):
return {
'id': self.id,
'name': self.name,
'emailAddress': self.email_address,
'supplierCode': self.supplier_code,
'supplierName': self.supplier_name,
'locked': self.locked,
}
@staticmethod
def from_json(user_json):
user = user_json["users"]
supplier_code = None
supplier_name = None
if "supplier" in user:
supplier_code = user["supplier"]["supplierCode"]
supplier_name = user["supplier"]["name"]
return User(
user_id=user["id"],
email_address=user['emailAddress'],
supplier_code=supplier_code,
supplier_name=supplier_name,
locked=user.get('locked', False),
active=user.get('active', True),
name=user['name'],
role=user['role']
)
@staticmethod
def load_user(data_api_client, user_id):
user_json = data_api_client.get_user(user_id=int(user_id))
if user_json:
user = User.from_json(user_json)
if user.is_active:
return user
| true | true |
f731cede9482145859d440ea399c33fb16b5b9b9 | 291 | py | Python | Python/Numpy/Shape and Reshape.py | pavstar619/HackerRank | 697ee46b6e621ad884a064047461d7707b1413cd | [
"MIT"
] | 61 | 2017-04-27T13:45:12.000Z | 2022-01-27T11:40:15.000Z | Python/Numpy/Shape and Reshape.py | fahad0193/HackerRank | eb6c95e16688c02921c1df6b6ea613667a251457 | [
"MIT"
] | 1 | 2017-06-24T14:16:06.000Z | 2017-06-24T14:16:28.000Z | Python/Numpy/Shape and Reshape.py | fahad0193/HackerRank | eb6c95e16688c02921c1df6b6ea613667a251457 | [
"MIT"
] | 78 | 2017-07-05T11:48:20.000Z | 2022-02-08T08:04:22.000Z | import numpy as np
class Main:
def __init__(self):
self.li = list(map(int, input().split()))
self.np_li = np.array(self.li)
def output(self):
print(np.reshape(self.np_li, (3,3)))
if __name__ == '__main__':
obj = Main()
obj.output()
| 20.785714 | 49 | 0.546392 | import numpy as np
class Main:
def __init__(self):
self.li = list(map(int, input().split()))
self.np_li = np.array(self.li)
def output(self):
print(np.reshape(self.np_li, (3,3)))
if __name__ == '__main__':
obj = Main()
obj.output()
| true | true |
f731cf1d9fcb728621d15653a869ac0796be8492 | 127,303 | py | Python | pylearn2/models/dbm/layer.py | kundajelab/pylearn2 | 17ebf0c37b35637e337b3ae884806d2c99beb31c | [
"BSD-3-Clause"
] | null | null | null | pylearn2/models/dbm/layer.py | kundajelab/pylearn2 | 17ebf0c37b35637e337b3ae884806d2c99beb31c | [
"BSD-3-Clause"
] | null | null | null | pylearn2/models/dbm/layer.py | kundajelab/pylearn2 | 17ebf0c37b35637e337b3ae884806d2c99beb31c | [
"BSD-3-Clause"
] | null | null | null | """
Common DBM Layer classes
"""
from __future__ import print_function
__authors__ = ["Ian Goodfellow", "Vincent Dumoulin"]
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import functools
import logging
import numpy as np
import operator
from theano.compat.six.moves import input, reduce, xrange
import time
import warnings
from theano import tensor as T, function, config
import theano
from theano.gof.op import get_debug_values
from theano.printing import Print
from pylearn2.compat import OrderedDict
from pylearn2.expr.nnet import sigmoid_numpy
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels, max_pool_b01c, max_pool, max_pool_c01b
from pylearn2.linear.conv2d import make_random_conv2D, make_sparse_random_conv2D
from pylearn2.linear.conv2d_c01b import setup_detector_layer_c01b
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.models import Model
from pylearn2.models.dbm import init_sigmoid_bias_from_marginals
from pylearn2.space import VectorSpace, CompositeSpace, Conv2DSpace, Space
from pylearn2.utils import is_block_gradient
from pylearn2.utils import sharedX, safe_zip, py_integer_types, block_gradient
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_theano_rng
from pylearn2.utils import safe_union
logger = logging.getLogger(__name__)
class Layer(Model):
"""
Abstract class.
A layer of a DBM.
May only belong to one DBM.
Each layer has a state ("total state") that can be split into
the piece that is visible to the layer above ("upward state")
and the piece that is visible to the layer below ("downward state").
(Since visible layers don't have a downward state, the downward_state
method only appears in the DBM_HiddenLayer subclass)
For simple layers, all three of these are the same thing.
"""
def get_dbm(self):
"""
Returns the DBM that this layer belongs to, or None
if it has not been assigned to a DBM yet.
"""
if hasattr(self, 'dbm'):
return self.dbm
return None
def set_dbm(self, dbm):
"""
Assigns this layer to a DBM.
Parameters
----------
dbm : WRITEME
"""
assert self.get_dbm() is None
self.dbm = dbm
def get_total_state_space(self):
"""
Returns the Space that the layer's total state lives in.
"""
raise NotImplementedError(str(type(self))+" does not implement " +\
"get_total_state_space()")
def get_monitoring_channels(self):
"""
.. todo::
WRITEME
"""
return OrderedDict()
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
return OrderedDict()
def upward_state(self, total_state):
"""
Takes total_state and turns it into the state that layer_above should
see when computing P( layer_above | this_layer).
So far this has two uses:
* If this layer consists of a detector sub-layer h that is pooled
into a pooling layer p, then total_state = (p,h) but layer_above
should only see p.
* If the conditional P( layer_above | this_layer) depends on
parameters of this_layer, sometimes you can play games with
the state to avoid needing the layers to communicate. So far
the only instance of this usage is when the visible layer
is N( Wh, beta). This makes the hidden layer be
sigmoid( v beta W + b). Rather than having the hidden layer
explicitly know about beta, we can just pass v beta as
the upward state.
Parameters
----------
total_state : WRITEME
Notes
-----
This method should work both for computing sampling updates
and for computing mean field updates. So far I haven't encountered
a case where it needs to do different things for those two
contexts.
"""
return total_state
def make_state(self, num_examples, numpy_rng):
"""
Returns a shared variable containing an actual state (not a mean field
state) for this variable.
Parameters
----------
num_examples : WRITEME
numpy_rng : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError("%s doesn't implement make_state" %
type(self))
def make_symbolic_state(self, num_examples, theano_rng):
"""
Returns a theano symbolic variable containing an actual state (not a
mean field state) for this variable.
Parameters
----------
num_examples : WRITEME
numpy_rng : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError("%s doesn't implement make_symbolic_state" %
type(self))
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
Returns an expression for samples of this layer's state, conditioned on
the layers above and below Should be valid as an update to the shared
variable returned by self.make_state
Parameters
----------
state_below : WRITEME
Corresponds to layer_below.upward_state(full_state_below),
where full_state_below is the same kind of object as you get
out of layer_below.make_state
state_above : WRITEME
Corresponds to layer_above.downward_state(full_state_above)
theano_rng : WRITEME
An MRG_RandomStreams instance
Returns
-------
WRITEME
Notes
-----
This can return multiple expressions if this layer's total state
consists of more than one shared variable.
"""
if hasattr(self, 'get_sampling_updates'):
raise AssertionError("Looks like "+str(type(self))+" needs to rename get_sampling_updates to sample.")
raise NotImplementedError("%s doesn't implement sample" %
type(self))
def expected_energy_term(self, state,
average,
state_below,
average_below):
"""
Returns a term of the expected energy of the entire model.
This term should correspond to the expected value of terms
of the energy function that:
- involve this layer only
- if there is a layer below, include terms that involve both this layer
and the layer below
Do not include terms that involve the layer below only.
Do not include any terms that involve the layer above, if it
exists, in any way (the interface doesn't let you see the layer
above anyway).
Parameters
----------
state_below : WRITEME
Upward state of the layer below.
state : WRITEME
Total state of this layer
average_below : bool
If True, the layer below is one of the variables to integrate
over in the expectation, and state_below gives its variational
parameters. If False, that layer is to be held constant and
state_below gives a set of assignments to it average: like
average_below, but for 'state' rather than 'state_below'
Returns
-------
rval : tensor_like
A 1D theano tensor giving the expected energy term for each example
"""
raise NotImplementedError(str(type(self))+" does not implement expected_energy_term.")
def finalize_initialization(self):
"""
Some layers' initialization depends on layer above being initialized,
which is why this method is called after `set_input_space` has been
called.
"""
pass
class VisibleLayer(Layer):
"""
Abstract class.
A layer of a DBM that may be used as a visible layer.
Currently, all implemented layer classes may be either visible
or hidden but not both. It may be worth making classes that can
play both roles though. This would allow getting rid of the BinaryVector
class.
"""
def get_total_state_space(self):
"""
Returns the total state of the layer.
Returns
-------
total_state : member of the input space
The total state of the layer.
"""
return self.get_input_space()
class HiddenLayer(Layer):
"""
Abstract class.
A layer of a DBM that may be used as a hidden layer.
"""
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
return total_state
def get_stdev_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
raise NotImplementedError(str(type(self))+" does not implement get_stdev_rewards")
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
raise NotImplementedError(str(type(self))+" does not implement get_range_rewards")
def get_l1_act_cost(self, state, target, coeff, eps):
"""
.. todo::
WRITEME
"""
raise NotImplementedError(str(type(self))+" does not implement get_l1_act_cost")
def get_l2_act_cost(self, state, target, coeff):
"""
.. todo::
WRITEME
"""
raise NotImplementedError(str(type(self))+" does not implement get_l2_act_cost")
class BinaryVector(VisibleLayer):
"""
A DBM visible layer consisting of binary random variables living
in a VectorSpace.
Parameters
----------
nvis : int
Dimension of the space
bias_from_marginals : pylearn2.datasets.dataset.Dataset
Dataset, whose marginals are used to initialize the visible biases
center : bool
If True, use Gregoire Montavon's centering trick
copies : int
Use this number of virtual copies of the state. All the copies
still share parameters. This can be useful for balancing the
amount of influencing two neighboring layers have on each other
if the layers have different numbers or different types of units.
Without this replication, layers with more units or units with
a greater dynamic range would dominate the interaction due to
the symmetric nature of undirected interactions.
"""
def __init__(self,
nvis,
bias_from_marginals = None,
center = False,
copies = 1, learn_init_inpainting_state = False):
super(BinaryVector, self).__init__()
self.__dict__.update(locals())
del self.self
# Don't serialize the dataset
del self.bias_from_marginals
self.space = VectorSpace(nvis)
self.input_space = self.space
origin = self.space.get_origin()
if bias_from_marginals is None:
init_bias = np.zeros((nvis,))
else:
init_bias = init_sigmoid_bias_from_marginals(bias_from_marginals)
self.bias = sharedX(init_bias, 'visible_bias')
if center:
self.offset = sharedX(sigmoid_numpy(init_bias))
def get_biases(self):
"""
Returns
-------
biases : ndarray
The numpy value of the biases
"""
return self.bias.get_value()
def set_biases(self, biases, recenter=False):
"""
.. todo::
WRITEME
"""
self.bias.set_value(biases)
if recenter:
assert self.center
self.offset.set_value(sigmoid_numpy(self.bias.get_value()))
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'center'):
self.center = False
if self.center:
rval = total_state - self.offset
else:
rval = total_state
if not hasattr(self, 'copies'):
self.copies = 1
return rval * self.copies
def get_params(self):
"""
.. todo::
WRITEME
"""
return [self.bias]
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
assert state_below is None
if self.copies != 1:
raise NotImplementedError()
msg = layer_above.downward_message(state_above)
bias = self.bias
z = msg + bias
phi = T.nnet.sigmoid(z)
rval = theano_rng.binomial(size = phi.shape, p = phi, dtype = phi.dtype,
n = 1 )
return rval
def mf_update(self, state_above, layer_above):
"""
.. todo::
WRITEME
"""
msg = layer_above.downward_message(state_above)
mu = self.bias
z = msg + mu
rval = T.nnet.sigmoid(z)
return rval
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
driver = numpy_rng.uniform(0.,1., (num_examples, self.nvis))
mean = sigmoid_numpy(self.bias.get_value())
sample = driver < mean
rval = sharedX(sample, name = 'v_sample_shared')
return rval
def make_symbolic_state(self, num_examples, theano_rng):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
mean = T.nnet.sigmoid(self.bias)
rval = theano_rng.binomial(size=(num_examples, self.nvis), p=mean,
dtype=theano.config.floatX)
return rval
def expected_energy_term(self, state, average, state_below = None, average_below = None):
"""
.. todo::
WRITEME
"""
if self.center:
state = state - self.offset
assert state_below is None
assert average_below is None
assert average in [True, False]
self.space.validate(state)
# Energy function is linear so it doesn't matter if we're averaging or not
rval = -T.dot(state, self.bias)
assert rval.ndim == 1
return rval * self.copies
def init_inpainting_state(self, V, drop_mask, noise = False, return_unmasked = False):
"""
.. todo::
WRITEME
"""
assert drop_mask is None or drop_mask.ndim > 1
unmasked = T.nnet.sigmoid(self.bias.dimshuffle('x',0))
# this condition is needed later if unmasked is used as V_hat
assert unmasked.ndim == 2
# this condition is also needed later if unmasked is used as V_hat
assert hasattr(unmasked.owner.op, 'scalar_op')
if drop_mask is not None:
masked_mean = unmasked * drop_mask
else:
masked_mean = unmasked
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = 0
if not self.learn_init_inpainting_state:
masked_mean = block_gradient(masked_mean)
masked_mean.name = 'masked_mean'
if noise:
theano_rng = theano.sandbox.rng_mrg.MRG_RandomStreams(42)
# we want a set of random mean field parameters, not binary samples
unmasked = T.nnet.sigmoid(theano_rng.normal(avg = 0.,
std = 1., size = masked_mean.shape,
dtype = masked_mean.dtype))
masked_mean = unmasked * drop_mask
masked_mean.name = 'masked_noise'
if drop_mask is None:
rval = masked_mean
else:
masked_V = V * (1-drop_mask)
rval = masked_mean + masked_V
rval.name = 'init_inpainting_state'
if return_unmasked:
assert unmasked.ndim > 1
return rval, unmasked
return rval
def inpaint_update(self, state_above, layer_above, drop_mask = None, V = None, return_unmasked = False):
"""
.. todo::
WRITEME
"""
msg = layer_above.downward_message(state_above)
mu = self.bias
z = msg + mu
z.name = 'inpainting_z_[unknown_iter]'
unmasked = T.nnet.sigmoid(z)
if drop_mask is not None:
rval = drop_mask * unmasked + (1-drop_mask) * V
else:
rval = unmasked
rval.name = 'inpainted_V[unknown_iter]'
if return_unmasked:
owner = unmasked.owner
assert owner is not None
op = owner.op
assert hasattr(op, 'scalar_op')
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
return rval, unmasked
return rval
def recons_cost(self, V, V_hat_unmasked, drop_mask = None, use_sum=False):
"""
.. todo::
WRITEME
"""
if use_sum:
raise NotImplementedError()
V_hat = V_hat_unmasked
assert hasattr(V_hat, 'owner')
owner = V_hat.owner
assert owner is not None
op = owner.op
block_grad = False
if is_block_gradient(op):
assert isinstance(op.scalar_op, theano.scalar.Identity)
block_grad = True
real, = owner.inputs
owner = real.owner
op = owner.op
if not hasattr(op, 'scalar_op'):
raise ValueError("Expected V_hat_unmasked to be generated by an Elemwise op, got "+str(op)+" of type "+str(type(op)))
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
z ,= owner.inputs
if block_grad:
z = block_gradient(z)
if V.ndim != V_hat.ndim:
raise ValueError("V and V_hat_unmasked should have same ndim, but are %d and %d." % (V.ndim, V_hat.ndim))
unmasked_cost = V * T.nnet.softplus(-z) + (1 - V) * T.nnet.softplus(z)
assert unmasked_cost.ndim == V_hat.ndim
if drop_mask is None:
masked_cost = unmasked_cost
else:
masked_cost = drop_mask * unmasked_cost
return masked_cost.mean()
class BinaryVectorMaxPool(HiddenLayer):
"""
A hidden layer that does max-pooling on binary vectors.
It has two sublayers, the detector layer and the pooling
layer. The detector layer is its downward state and the pooling
layer is its upward state.
Parameters
----------
detector_layer_dim : int
Number of units in the detector layer
pool_size : int
Number of detector units per pooling unit
(Pools are disjoint)
layer_name : str
Name of the layer
irange : float
If specified, initialize the weights in U(-irange, irange)
include_prob : , optional
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
sparse_init : int
If specified, initialize this many weights in each column
to be nonzero.
sparse_stdev : float
When using sparse_init, the non-zero weights are drawn from
a Gaussian distribution with mean 0 and standard deviation
`sparse_stdev`
init_bias : float or ndarray
Initialize the biases to this value
W_lr_scale : float
Multiply the learning rate on the weights by this number
b_lr_scale : float
Multiply the learning rate on the biases by this number
center : bool
If True, use Gregoire Montavon's centering trick
mask_weights : WRITEME
max_col_norm : float
Constrain the columns of the weight matrix to have at most
this norm
copies : int
See BinaryVector docstring for explanation
"""
# TODO: this layer uses (pooled, detector) as its total state,
# which can be confusing when listing all the states in
# the network left to right. Change this and
# pylearn2.expr.probabilistic_max_pooling to use
# (detector, pooled)
def __init__(self,
detector_layer_dim,
pool_size,
layer_name,
irange = None,
sparse_init = None,
sparse_stdev = 1.,
include_prob = 1.0,
init_bias = 0.,
W_lr_scale = None,
b_lr_scale = None,
center = False,
mask_weights = None,
max_col_norm = None,
copies = 1):
super(BinaryVectorMaxPool, self).__init__()
self.__dict__.update(locals())
del self.self
self.b = sharedX( np.zeros((self.detector_layer_dim,)) + init_bias, name = layer_name + '_b')
if self.center:
if self.pool_size != 1:
raise NotImplementedError()
self.offset = sharedX(sigmoid_numpy(self.b.get_value()))
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
def set_input_space(self, space):
"""
.. todo::
WRITEME
Notes
-----
This resets parameters!
"""
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if not (self.detector_layer_dim % self.pool_size == 0):
raise ValueError("detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d" %
(self.detector_layer_dim, self.pool_size, self.detector_layer_dim % self.pool_size))
self.h_space = VectorSpace(self.detector_layer_dim)
self.pool_layer_dim = self.detector_layer_dim / self.pool_size
self.output_space = VectorSpace(self.pool_layer_dim)
rng = self.dbm.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.detector_layer_dim)) * \
(rng.uniform(0.,1., (self.input_dim, self.detector_layer_dim))
< self.include_prob)
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.detector_layer_dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.detector_layer_dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W ,= self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.detector_layer_dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape "+str(expected_shape)+" but got "+str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@functools.wraps(Model._modify_updates)
def _modify_updates(self, updates):
# Patch old pickle files
if not hasattr(self, 'mask_weights'):
self.mask_weights = None
if not hasattr(self, 'max_col_norm'):
self.max_col_norm = None
if self.mask_weights is not None:
W ,= self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
if self.max_col_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
desired_norms = T.clip(col_norms, 0, self.max_col_norm)
updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return CompositeSpace((self.output_space, self.h_space))
def get_params(self):
"""
.. todo::
WRITEME
"""
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
def get_weight_decay(self, coeff):
"""
.. todo::
WRITEME
"""
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W ,= self.transformer.get_params()
return coeff * T.sqr(W).sum()
def get_weights(self):
"""
.. todo::
WRITEME
"""
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W ,= self.transformer.get_params()
return W.get_value()
def set_weights(self, weights):
"""
.. todo::
WRITEME
"""
W, = self.transformer.get_params()
W.set_value(weights)
def set_biases(self, biases, recenter = False):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
if recenter:
assert self.center
if self.pool_size != 1:
raise NotImplementedError()
self.offset.set_value(sigmoid_numpy(self.b.get_value()))
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value()
def get_weights_format(self):
"""
.. todo::
WRITEME
"""
return ('v', 'h')
def get_weights_view_shape(self):
"""
.. todo::
WRITEME
"""
total = self.detector_layer_dim
cols = self.pool_size
if cols == 1:
# Let the PatchViewer decidew how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total / cols
return rows, cols
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W ,= self.transformer.get_params()
W = W.T
W = W.reshape((self.detector_layer_dim, self.input_space.shape[0],
self.input_space.shape[1], self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
self.h_space.validate(h)
self.output_space.validate(p)
if not hasattr(self, 'center'):
self.center = False
if self.center:
return p - self.offset
if not hasattr(self, 'copies'):
self.copies = 1
return p * self.copies
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
return h - self.offset
return h * self.copies
def get_monitoring_channels(self):
"""
.. todo::
WRITEME
"""
W ,= self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
return OrderedDict([
('row_norms_min' , row_norms.min()),
('row_norms_mean' , row_norms.mean()),
('row_norms_max' , row_norms.max()),
('col_norms_min' , col_norms.min()),
('col_norms_mean' , col_norms.mean()),
('col_norms_max' , col_norms.max()),
])
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
P, H = state
rval = OrderedDict()
if self.pool_size == 1:
vars_and_prefixes = [ (P,'') ]
else:
vars_and_prefixes = [ (P, 'p_'), (H, 'h_') ]
for var, prefix in vars_and_prefixes:
v_max = var.max(axis=0)
v_min = var.min(axis=0)
v_mean = var.mean(axis=0)
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over e*x*amples"
# The x and u are included in the name because otherwise its hard
# to remember which axis is which when reading the monitor
# I use inner.outer rather than outer_of_inner or something like that
# because I want mean_x.* to appear next to each other in the alphabetical
# list, as these are commonly plotted together
for key, val in [
('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())
]:
rval[prefix+key] = val
return rval
def get_stdev_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if isinstance(coeffs, str):
coeffs = float(coeffs)
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
assert all([isinstance(elem, float) for elem in [c]])
if c == 0.:
continue
mn = s.mean(axis=0)
dev = s - mn
stdev = T.sqrt(T.sqr(dev).mean(axis=0))
rval += (0.5 - stdev).mean()*c
return rval
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if isinstance(coeffs, str):
coeffs = float(coeffs)
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
assert all([isinstance(elem, float) for elem in [c]])
if c == 0.:
continue
mx = s.max(axis=0)
assert hasattr(mx.owner.op, 'grad')
assert mx.ndim == 1
mn = s.min(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mn.ndim == 1
r = mx - mn
rval += (1 - r).mean()*c
return rval
def get_l1_act_cost(self, state, target, coeff, eps = None):
"""
.. todo::
WRITEME
"""
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if not isinstance(target, float):
raise TypeError("BinaryVectorMaxPool.get_l1_act_cost expected target of type float " + \
" but an instance named "+self.layer_name + " got target "+str(target) + " of type "+str(type(target)))
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = [0.]
else:
eps = [eps]
else:
assert all([len(elem) == 2 for elem in [state, target, coeff]])
if eps is None:
eps = [0., 0.]
if target[1] > target[0]:
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
assert all([isinstance(elem, float) or hasattr(elem, 'dtype') for elem in [t, c, e]])
if c == 0.:
continue
m = s.mean(axis=0)
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_l2_act_cost(self, state, target, coeff):
"""
.. todo::
WRITEME
"""
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if not isinstance(target, float):
raise TypeError("BinaryVectorMaxPool.get_l1_act_cost expected target of type float " + \
" but an instance named "+self.layer_name + " got target "+str(target) + " of type "+str(type(target)))
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
_, state = state
state = [state]
target = [target]
coeff = [coeff]
else:
assert all([len(elem) == 2 for elem in [state, target, coeff]])
if target[1] > target[0]:
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c in safe_zip(state, target, coeff):
assert all([isinstance(elem, float) or hasattr(elem, 'dtype') for elem in [t, c]])
if c == 0.:
continue
m = s.mean(axis=0)
assert m.ndim == 1
rval += T.square(m-t).mean()*c
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
if self.copies != 1:
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
if state_above is not None:
msg = layer_above.downward_message(state_above)
else:
msg = None
if self.requires_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
z = self.transformer.lmul(state_below) + self.b
p, h, p_sample, h_sample = max_pool_channels(z,
self.pool_size, msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
self.h_space.validate(downward_state)
rval = self.transformer.lmul_T(downward_state)
if self.requires_reformat:
rval = self.desired_space.format_as(rval, self.input_space)
return rval * self.copies
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
# work around theano bug with broadcasted vectors
z = T.alloc(0., self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + \
self.b.dimshuffle('x', 0)
rval = max_pool_channels(z = z,
pool_size = self.pool_size)
return rval
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
""" Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
empty_input = self.h_space.get_origin_batch(num_examples)
empty_output = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
p_state = sharedX(empty_output)
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16), which_method="binomial")
default_z = T.zeros_like(h_state) + self.b
p_exp, h_exp, p_sample, h_sample = max_pool_channels(
z = default_z,
pool_size = self.pool_size,
theano_rng = theano_rng)
assert h_sample.dtype == default_z.dtype
f = function([], updates = [
(p_state , p_sample),
(h_state , h_sample)
])
f()
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def make_symbolic_state(self, num_examples, theano_rng):
"""
.. todo::
WRITEME
"""
"""
Returns a theano symbolic variable containing an actual state
(not a mean field state) for this variable.
"""
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
default_z = T.alloc(self.b, num_examples, self.detector_layer_dim)
p_exp, h_exp, p_sample, h_sample = max_pool_channels(z=default_z,
pool_size=self.pool_size,
theano_rng=theano_rng)
assert h_sample.dtype == default_z.dtype
return p_sample, h_sample
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
# Don't need to do anything special for centering, upward_state / downward state
# make it all just work
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = T.dot(downward_state, self.b)
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=1)
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval * self.copies
def linear_feed_forward_approximation(self, state_below):
"""
Used to implement TorontoSparsity. Unclear exactly what properties of
it are important or how to implement it for other layers.
Properties it must have: output is same kind of data structure (ie,
tuple of theano 2-tensors) as mf_update.
Properties it probably should have for other layer types: an
infinitesimal change in state_below or the parameters should cause the
same sign of change in the output of linear_feed_forward_approximation
and in mf_update
Should not have any non-linearities that cause the gradient to shrink
Should disregard top-down feedback
Parameters
----------
state_below : WRITEME
"""
z = self.transformer.lmul(state_below) + self.b
if self.pool_size != 1:
# Should probably implement sum pooling for the non-pooled version,
# but in reality it's not totally clear what the right answer is
raise NotImplementedError()
return z, z
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.b
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = max_pool_channels(z, self.pool_size, msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
class Softmax(HiddenLayer):
"""
A layer representing a single softmax distribution of a
set of discrete categories.
Parameters
----------
n_classes : int
The number of discrete categories.
layer_name : str
The name of the layer.
irange : float
If not None, initialze the weights in U(-irange, irange)
sparse_init : int
If not None, initialize `sparse_init` weights per column
to N(0, sparse_istdev^2)
sparse_istdev : float
see above
W_lr_scale : float
Scale the learning rate on the weights by this amount
b_lr_scale : float
Scale the learning rate on the biases by this amount
max_col_norm : float
If not None, constrain the columns of the weight matrix
to have at most this L2 norm
copies : int
Make this many copies of the random variables, all sharing
the same weights. This allows the undirected model to
behave as if it has asymmetric connections.
center : bool
If True, use Gregoire Montavon's centering trick.
learn_init_inpainting_state : bool
If True, and using inpainting-based methods (MP-DBM), learn
a parameter controlling the initial value of the mean field
state for this layer.
"""
presynaptic_name = "presynaptic_Y_hat"
def __init__(self, n_classes, layer_name, irange = None,
sparse_init = None, sparse_istdev = 1., W_lr_scale = None,
b_lr_scale = None,
max_col_norm = None,
copies = 1, center = False,
learn_init_inpainting_state = True):
super(Softmax, self).__init__()
if isinstance(W_lr_scale, str):
W_lr_scale = float(W_lr_scale)
self.__dict__.update(locals())
del self.self
assert isinstance(n_classes, py_integer_types)
self.output_space = VectorSpace(n_classes)
self.b = sharedX( np.zeros((n_classes,)), name = 'softmax_b')
if self.center:
b = self.b.get_value()
self.offset = sharedX(np.exp(b) / np.exp(b).sum())
@functools.wraps(Model._modify_updates)
def _modify_updates(self, updates):
if not hasattr(self, 'max_col_norm'):
self.max_col_norm = None
if self.max_col_norm is not None:
W = self.W
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
desired_norms = T.clip(col_norms, 0, self.max_col_norm)
updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
@functools.wraps(Model.get_lr_scalers)
def get_lr_scalers(self):
rval = OrderedDict()
# Patch old pickle files
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if self.W_lr_scale is not None:
assert isinstance(self.W_lr_scale, float)
rval[self.W] = self.W_lr_scale
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
if self.b_lr_scale is not None:
assert isinstance(self.b_lr_scale, float)
rval[self.b] = self.b_lr_scale
return rval
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return self.output_space
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
mx = state.max(axis=1)
return OrderedDict([
('mean_max_class' , mx.mean()),
('max_max_class' , mx.max()),
('min_max_class' , mx.min())
])
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
if not isinstance(space, Space):
raise TypeError("Expected Space, got "+
str(space)+" of type "+str(type(space)))
self.input_dim = space.get_total_dimension()
self.needs_reformat = not isinstance(space, VectorSpace)
self.desired_space = VectorSpace(self.input_dim)
if not self.needs_reformat:
assert self.desired_space == self.input_space
rng = self.dbm.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,self.irange, (self.input_dim,self.n_classes))
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.n_classes))
for i in xrange(self.n_classes):
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0.:
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn() * self.sparse_istdev
self.W = sharedX(W, 'softmax_W' )
self._params = [ self.b, self.W ]
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
desired = self.W.get_value().T
ipt = self.desired_space.format_as(desired, self.input_space)
rval = Conv2DSpace.convert_numpy(ipt, self.input_space.axes, ('b', 0, 1, 'c'))
return rval
def get_weights(self):
"""
.. todo::
WRITEME
"""
if not isinstance(self.input_space, VectorSpace):
raise NotImplementedError()
return self.W.get_value()
def set_weights(self, weights):
"""
.. todo::
WRITEME
"""
self.W.set_value(weights)
def set_biases(self, biases, recenter=False):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
if recenter:
assert self.center
self.offset.set_value( (np.exp(biases) / np.exp(biases).sum()).astype(self.offset.dtype))
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value()
def get_weights_format(self):
"""
.. todo::
WRITEME
"""
return ('v', 'h')
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
if self.copies != 1:
raise NotImplementedError("need to draw self.copies samples and average them together.")
if state_above is not None:
# If you implement this case, also add a unit test for it.
# Or at least add a warning that it is not tested.
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
self.input_space.validate(state_below)
# patch old pickle files
if not hasattr(self, 'needs_reformat'):
self.needs_reformat = self.needs_reshape
del self.needs_reshape
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
self.desired_space.validate(state_below)
z = T.dot(state_below, self.W) + self.b
h_exp = T.nnet.softmax(z)
h_sample = theano_rng.multinomial(pvals = h_exp, dtype = h_exp.dtype)
return h_sample
def mf_update(self, state_below, state_above = None, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
if state_above is not None:
raise NotImplementedError()
if double_weights:
raise NotImplementedError()
self.input_space.validate(state_below)
# patch old pickle files
if not hasattr(self, 'needs_reformat'):
self.needs_reformat = self.needs_reshape
del self.needs_reshape
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
for value in get_debug_values(state_below):
if value.shape[0] != self.dbm.batch_size:
raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))
self.desired_space.validate(state_below)
assert self.W.ndim == 2
assert state_below.ndim == 2
b = self.b
Z = T.dot(state_below, self.W) + b
rval = T.nnet.softmax(Z)
for value in get_debug_values(rval):
assert value.shape[0] == self.dbm.batch_size
return rval
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'copies'):
self.copies = 1
rval = T.dot(downward_state, self.W.T) * self.copies
rval = self.desired_space.format_as(rval, self.input_space)
return rval
def recons_cost(self, Y, Y_hat_unmasked, drop_mask_Y, scale):
"""
The cost of reconstructing `Y` as `Y_hat`. Specifically,
the negative log probability.
This cost is for use with multi-prediction training.
Parameters
----------
Y : target space batch
The data labels
Y_hat_unmasked : target space batch
The output of this layer's `mf_update`; the predicted
values of `Y`. Even though the model is only predicting
the dropped values, we take predictions for all the
values here.
drop_mask_Y : 1-D theano tensor
A batch of 0s/1s, with 1s indicating that variables
have been dropped, and should be included in the
reconstruction cost. One indicator per example in the
batch, since each example in this layer only has one
random variable in it.
scale : float
Multiply the cost by this amount.
We need to do this because the visible layer also goes into
the cost. We use the mean over units and examples, so that
the scale of the cost doesn't change too much with batch
size or example size.
We need to multiply this cost by scale to make sure that
it is put on the same scale as the reconstruction cost
for the visible units. ie, scale should be 1/nvis
"""
Y_hat = Y_hat_unmasked
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
masked = log_prob_of * drop_mask_Y
assert masked.ndim == 1
rval = masked.mean() * scale * self.copies
return - rval
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
rval = T.nnet.softmax(self.b.dimshuffle('x', 0)) + T.alloc(0., self.dbm.batch_size, self.n_classes).astype(config.floatX)
return rval
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
""" Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
if self.copies != 1:
raise NotImplementedError("need to make self.copies samples and average them together.")
t1 = time.time()
empty_input = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.b
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
h_exp = T.nnet.softmax(default_z)
h_sample = theano_rng.multinomial(pvals = h_exp, dtype = h_exp.dtype)
h_state = sharedX( self.output_space.get_origin_batch(
num_examples))
t2 = time.time()
f = function([], updates = [(
h_state , h_sample
)])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took {1}'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
h_state.name = 'softmax_sample_shared'
return h_state
def make_symbolic_state(self, num_examples, theano_rng):
"""
.. todo::
WRITEME
"""
"""
Returns a symbolic variable containing an actual state
(not a mean field state) for this variable.
"""
if self.copies != 1:
raise NotImplementedError("need to make self.copies samples and average them together.")
default_z = T.alloc(self.b, num_examples, self.n_classes)
h_exp = T.nnet.softmax(default_z)
h_sample = theano_rng.multinomial(pvals=h_exp, dtype=h_exp.dtype)
return h_sample
def get_weight_decay(self, coeff):
"""
.. todo::
WRITEME
"""
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
return coeff * T.sqr(self.W).sum()
def upward_state(self, state):
"""
.. todo::
WRITEME
"""
if self.center:
return state - self.offset
return state
def downward_state(self, state):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'center'):
self.center = False
if self.center:
"""TODO: write a unit test verifying that inference or sampling
below a centered Softmax layer works"""
return state - self.offset
return state
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
if self.center:
state = state - self.offset
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
self.desired_space.validate(state_below)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = T.dot(state, self.b)
weights_term = (T.dot(state_below, self.W) * state).sum(axis=1)
rval = -bias_term - weights_term
rval *= self.copies
assert rval.ndim == 1
return rval
def init_inpainting_state(self, Y, noise):
"""
.. todo::
WRITEME
"""
if noise:
theano_rng = make_theano_rng(None, 2012+10+30, which_method="binomial")
return T.nnet.softmax(theano_rng.normal(avg=0., size=Y.shape, std=1., dtype='float32'))
rval = T.nnet.softmax(self.b)
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = 1
if not self.learn_init_inpainting_state:
rval = block_gradient(rval)
return rval
def install_presynaptic_outputs(self, outputs_dict, batch_size):
"""
.. todo::
WRITEME
"""
assert self.presynaptic_name not in outputs_dict
outputs_dict[self.presynaptic_name] = self.output_space.make_shared_batch(batch_size, self.presynaptic_name)
class GaussianVisLayer(VisibleLayer):
"""
Implements a visible layer that is conditionally gaussian with
diagonal variance. The layer lives in a Conv2DSpace.
Parameters
----------
rows, cols, channels : WRITEME
the shape of the space
learn_init_inpainting : bool, optional
WRITEME
nvis : WRITEME
init_beta : WRITEME
the initial value of the precision parameter
min_beta : WRITEME
clip beta so it is at least this big (default 1)
init_mu : WRITEME
the initial value of the mean parameter
tie_beta : WRITEME
None or a string specifying how to tie beta 'locations' = tie beta
across locations, ie beta should be a vector with one elem per channel
tie_mu : WRITEME
None or a string specifying how to tie mu 'locations' = tie mu across
locations, ie mu should be a vector with one elem per channel
bias_from_marginals : WRITEME
beta_lr_scale : WRITEME
axes : tuple
WRITEME
"""
def __init__(self,
rows = None,
cols = None,
learn_init_inpainting_state=True,
channels = None,
nvis = None,
init_beta = 1.,
min_beta = 1.,
init_mu = None,
tie_beta = None,
tie_mu = None,
bias_from_marginals = None,
beta_lr_scale = 'by_sharing',
axes = ('b', 0, 1, 'c')):
warnings.warn("GaussianVisLayer math very faith based, need to finish working through gaussian.lyx")
self.__dict__.update(locals())
del self.self
if bias_from_marginals is not None:
del self.bias_from_marginals
if self.nvis is None:
raise NotImplementedError()
assert init_mu is None
init_mu = bias_from_marginals.X.mean(axis=0)
if init_mu is None:
init_mu = 0.
if nvis is None:
assert rows is not None
assert cols is not None
assert channels is not None
self.space = Conv2DSpace(shape=[rows,cols], num_channels=channels, axes=axes)
# To make GaussianVisLayer compatible with any axis ordering
self.batch_axis=list(axes).index('b')
self.axes_to_sum = list(range(len(axes)))
self.axes_to_sum.remove(self.batch_axis)
else:
assert rows is None
assert cols is None
assert channels is None
self.space = VectorSpace(nvis)
self.axes_to_sum = 1
self.batch_axis = None
self.input_space = self.space
origin = self.space.get_origin()
beta_origin = origin.copy()
assert tie_beta in [ None, 'locations']
if tie_beta == 'locations':
assert nvis is None
beta_origin = np.zeros((self.space.num_channels,))
self.beta = sharedX(beta_origin + init_beta,name = 'beta')
assert self.beta.ndim == beta_origin.ndim
mu_origin = origin.copy()
assert tie_mu in [None, 'locations']
if tie_mu == 'locations':
assert nvis is None
mu_origin = np.zeros((self.space.num_channels,))
self.mu = sharedX( mu_origin + init_mu, name = 'mu')
assert self.mu.ndim == mu_origin.ndim
def get_monitoring_channels(self):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
rval['beta_min'] = self.beta.min()
rval['beta_mean'] = self.beta.mean()
rval['beta_max'] = self.beta.max()
return rval
def get_params(self):
"""
.. todo::
WRITEME
"""
if self.mu is None:
return [self.beta]
return [self.beta, self.mu]
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
if self.nvis is None:
rows, cols = self.space.shape
num_loc = float(rows * cols)
assert self.tie_beta in [None, 'locations']
if self.beta_lr_scale == 'by_sharing':
if self.tie_beta == 'locations':
assert self.nvis is None
rval[self.beta] = 1. / num_loc
elif self.beta_lr_scale == None:
pass
else:
rval[self.beta] = self.beta_lr_scale
assert self.tie_mu in [None, 'locations']
if self.tie_mu == 'locations':
warn = True
assert self.nvis is None
rval[self.mu] = 1./num_loc
logger.warning("mu lr_scaler hardcoded to 1/sharing")
return rval
@functools.wraps(Model._modify_updates)
def _modify_updates(self, updates):
if self.beta in updates:
updated_beta = updates[self.beta]
updates[self.beta] = T.clip(updated_beta,
self.min_beta,1e6)
def set_biases(self, bias):
"""
Set mean parameter
Parameters
----------
bias: WRITEME
Vector of size nvis
"""
self.mu = sharedX(bias, name = 'mu')
def broadcasted_mu(self):
"""
Returns mu, broadcasted to have the same shape as a batch of data
"""
if self.tie_mu == 'locations':
def f(x):
if x == 'c':
return 0
return 'x'
axes = [f(ax) for ax in self.axes]
rval = self.mu.dimshuffle(*axes)
else:
assert self.tie_mu is None
if self.nvis is None:
axes = [0, 1, 2]
axes.insert(self.axes.index('b'), 'x')
rval = self.mu.dimshuffle(*axes)
else:
rval = self.mu.dimshuffle('x', 0)
self.input_space.validate(rval)
return rval
def broadcasted_beta(self):
"""
Returns beta, broadcasted to have the same shape as a batch of data
"""
return self.broadcast_beta(self.beta)
def broadcast_beta(self, beta):
"""
.. todo::
WRITEME
"""
"""
Returns beta, broadcasted to have the same shape as a batch of data
"""
if self.tie_beta == 'locations':
def f(x):
if x == 'c':
return 0
return 'x'
axes = [f(ax) for ax in self.axes]
rval = beta.dimshuffle(*axes)
else:
assert self.tie_beta is None
if self.nvis is None:
axes = [0, 1, 2]
axes.insert(self.axes.index('b'), 'x')
rval = beta.dimshuffle(*axes)
else:
rval = beta.dimshuffle('x', 0)
self.input_space.validate(rval)
return rval
def init_inpainting_state(self, V, drop_mask, noise = False, return_unmasked = False):
"""
.. todo::
WRITEME
"""
"""for Vv, drop_mask_v in get_debug_values(V, drop_mask):
assert Vv.ndim == 4
assert drop_mask_v.ndim in [3,4]
for i in xrange(drop_mask.ndim):
if Vv.shape[i] != drop_mask_v.shape[i]:
print(Vv.shape)
print(drop_mask_v.shape)
assert False
"""
unmasked = self.broadcasted_mu()
if drop_mask is None:
assert not noise
assert not return_unmasked
return unmasked
masked_mu = unmasked * drop_mask
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = True
if not self.learn_init_inpainting_state:
masked_mu = block_gradient(masked_mu)
masked_mu.name = 'masked_mu'
if noise:
theano_rng = make_theano_rng(None, 42, which_method="binomial")
unmasked = theano_rng.normal(avg = 0.,
std = 1., size = masked_mu.shape,
dtype = masked_mu.dtype)
masked_mu = unmasked * drop_mask
masked_mu.name = 'masked_noise'
masked_V = V * (1-drop_mask)
rval = masked_mu + masked_V
rval.name = 'init_inpainting_state'
if return_unmasked:
return rval, unmasked
return rval
def expected_energy_term(self, state, average, state_below = None, average_below = None):
"""
.. todo::
WRITEME
"""
assert state_below is None
assert average_below is None
self.space.validate(state)
if average:
raise NotImplementedError(str(type(self))+" doesn't support integrating out variational parameters yet.")
else:
rval = 0.5 * (self.beta * T.sqr(state - self.mu)).sum(axis=self.axes_to_sum)
assert rval.ndim == 1
return rval
def inpaint_update(self, state_above, layer_above, drop_mask = None, V = None,
return_unmasked = False):
"""
.. todo::
WRITEME
"""
msg = layer_above.downward_message(state_above)
mu = self.broadcasted_mu()
z = msg + mu
z.name = 'inpainting_z_[unknown_iter]'
if drop_mask is not None:
rval = drop_mask * z + (1-drop_mask) * V
else:
rval = z
rval.name = 'inpainted_V[unknown_iter]'
if return_unmasked:
return rval, z
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
assert state_below is None
msg = layer_above.downward_message(state_above)
mu = self.mu
z = msg + mu
rval = theano_rng.normal(size = z.shape, avg = z, dtype = z.dtype,
std = 1. / T.sqrt(self.beta))
return rval
def recons_cost(self, V, V_hat_unmasked, drop_mask = None, use_sum=False):
"""
.. todo::
WRITEME
"""
return self._recons_cost(V=V, V_hat_unmasked=V_hat_unmasked, drop_mask=drop_mask, use_sum=use_sum, beta=self.beta)
def _recons_cost(self, V, V_hat_unmasked, beta, drop_mask=None, use_sum=False):
"""
.. todo::
WRITEME
"""
V_hat = V_hat_unmasked
assert V.ndim == V_hat.ndim
beta = self.broadcasted_beta()
unmasked_cost = 0.5 * beta * T.sqr(V-V_hat) - 0.5*T.log(beta / (2*np.pi))
assert unmasked_cost.ndim == V_hat.ndim
if drop_mask is None:
masked_cost = unmasked_cost
else:
masked_cost = drop_mask * unmasked_cost
if use_sum:
return masked_cost.mean(axis=0).sum()
return masked_cost.mean()
return masked_cost.mean()
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
if self.nvis is None and total_state.ndim != 4:
raise ValueError("total_state should have 4 dimensions, has "+str(total_state.ndim))
assert total_state is not None
V = total_state
self.input_space.validate(V)
upward_state = (V - self.broadcasted_mu()) * self.broadcasted_beta()
return upward_state
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
shape = [num_examples]
if self.nvis is None:
rows, cols = self.space.shape
channels = self.space.num_channels
shape.append(rows)
shape.append(cols)
shape.append(channels)
else:
shape.append(self.nvis)
sample = numpy_rng.randn(*shape)
sample *= 1./np.sqrt(self.beta.get_value())
sample += self.mu.get_value()
rval = sharedX(sample, name = 'v_sample_shared')
return rval
def install_presynaptic_outputs(self, outputs_dict, batch_size):
"""
.. todo::
WRITEME
"""
outputs_dict['output_V_weighted_pred_sum'] = self.space.make_shared_batch(batch_size)
def ensemble_prediction(self, symbolic, outputs_dict, ensemble):
"""
.. todo::
WRITEME
"""
"""
Output a symbolic expression for V_hat_unmasked based on taking the
geometric mean over the ensemble and renormalizing.
n - 1 members of the ensemble have modified outputs_dict and the nth
gives its prediction in "symbolic". The parameters for the nth one
are currently loaded in the model.
"""
weighted_pred_sum = outputs_dict['output_V_weighted_pred_sum'] \
+ self.broadcasted_beta() * symbolic
beta_sum = sum(ensemble.get_ensemble_variants(self.beta))
unmasked_V_hat = weighted_pred_sum / self.broadcast_beta(beta_sum)
return unmasked_V_hat
def ensemble_recons_cost(self, V, V_hat_unmasked, drop_mask=None,
use_sum=False, ensemble=None):
"""
.. todo::
WRITEME
"""
beta = sum(ensemble.get_ensemble_variants(self.beta)) / ensemble.num_copies
return self._recons_cost(V=V, V_hat_unmasked=V_hat_unmasked, beta=beta, drop_mask=drop_mask,
use_sum=use_sum)
class ConvMaxPool(HiddenLayer):
"""
.. todo::
WRITEME
"""
def __init__(self,
output_channels,
kernel_rows,
kernel_cols,
pool_rows,
pool_cols,
layer_name,
center = False,
irange = None,
sparse_init = None,
scale_by_sharing = True,
init_bias = 0.,
border_mode = 'valid',
output_axes = ('b', 'c', 0, 1)):
self.__dict__.update(locals())
del self.self
assert (irange is None) != (sparse_init is None)
self.b = sharedX( np.zeros((output_channels,)) + init_bias, name = layer_name + '_b')
assert border_mode in ['full','valid']
def broadcasted_bias(self):
"""
.. todo::
WRITEME
"""
assert self.b.ndim == 1
shuffle = [ 'x' ] * 4
shuffle[self.output_axes.index('c')] = 0
return self.b.dimshuffle(*shuffle)
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return CompositeSpace((self.h_space, self.output_space))
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
""" Note: this resets parameters!"""
if not isinstance(space, Conv2DSpace):
raise TypeError("ConvMaxPool can only act on a Conv2DSpace, but received " +
str(type(space))+" as input.")
self.input_space = space
self.input_rows, self.input_cols = space.shape
self.input_channels = space.num_channels
if self.border_mode == 'valid':
self.h_rows = self.input_rows - self.kernel_rows + 1
self.h_cols = self.input_cols - self.kernel_cols + 1
else:
assert self.border_mode == 'full'
self.h_rows = self.input_rows + self.kernel_rows - 1
self.h_cols = self.input_cols + self.kernel_cols - 1
if not( self.h_rows % self.pool_rows == 0):
raise ValueError("h_rows = %d, pool_rows = %d. Should be divisible but remainder is %d" %
(self.h_rows, self.pool_rows, self.h_rows % self.pool_rows))
assert self.h_cols % self.pool_cols == 0
self.h_space = Conv2DSpace(shape = (self.h_rows, self.h_cols), num_channels = self.output_channels,
axes = self.output_axes)
self.output_space = Conv2DSpace(shape = (self.h_rows / self.pool_rows,
self.h_cols / self.pool_cols),
num_channels = self.output_channels,
axes = self.output_axes)
logger.info('{0}: detector shape: {1} '
'pool shape: {2}'.format(self.layer_name,
self.h_space.shape,
self.output_space.shape))
if tuple(self.output_axes) == ('b', 0, 1, 'c'):
self.max_pool = max_pool_b01c
elif tuple(self.output_axes) == ('b', 'c', 0, 1):
self.max_pool = max_pool
else:
raise NotImplementedError()
if self.irange is not None:
self.transformer = make_random_conv2D(self.irange, input_space = space,
output_space = self.h_space, kernel_shape = (self.kernel_rows, self.kernel_cols),
batch_size = self.dbm.batch_size, border_mode = self.border_mode, rng = self.dbm.rng)
else:
self.transformer = make_sparse_random_conv2D(self.sparse_init, input_space = space,
output_space = self.h_space, kernel_shape = (self.kernel_rows, self.kernel_cols),
batch_size = self.dbm.batch_size, border_mode = self.border_mode, rng = self.dbm.rng)
self.transformer._filters.name = self.layer_name + '_W'
W ,= self.transformer.get_params()
assert W.name is not None
if self.center:
p_ofs, h_ofs = self.init_mf_state()
self.p_offset = sharedX(self.output_space.get_origin(), 'p_offset')
self.h_offset = sharedX(self.h_space.get_origin(), 'h_offset')
f = function([], updates={self.p_offset: p_ofs[0,:,:,:], self.h_offset: h_ofs[0,:,:,:]})
f()
def get_params(self):
"""
.. todo::
WRITEME
"""
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
return [ W, self.b]
def state_to_b01c(self, state):
"""
.. todo::
WRITEME
"""
if tuple(self.output_axes) == ('b',0,1,'c'):
return state
return [ Conv2DSpace.convert(elem, self.output_axes, ('b', 0, 1, 'c'))
for elem in state ]
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
if c == 0.:
continue
# Range over everything but the channel index
# theano can only take gradient through max if the max is over 1 axis or all axes
# so I manually unroll the max for the case I use here
assert self.h_space.axes == ('b', 'c', 0, 1)
assert self.output_space.axes == ('b', 'c', 0, 1)
mx = s.max(axis=3).max(axis=2).max(axis=0)
assert hasattr(mx.owner.op, 'grad')
mn = s.min(axis=3).max(axis=2).max(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mx.ndim == 1
assert mn.ndim == 1
r = mx - mn
rval += (1. - r).mean() * c
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
"""
.. todo::
WRITEME
"""
"""
target: if pools contain more than one element, should be a list with
two elements. the first element is for the pooling units and
the second for the detector units.
"""
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(target, float)
assert isinstance(coeff, float)
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = 0.
eps = [eps]
else:
if eps is None:
eps = [0., 0.]
assert all([len(elem) == 2 for elem in [state, target, coeff]])
p_target, h_target = target
if h_target > p_target and (coeff[0] != 0. and coeff[1] != 0.):
# note that, within each group, E[p] is the sum of E[h]
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
if c == 0.:
continue
# Average over everything but the channel index
m = s.mean(axis= [ ax for ax in range(4) if self.output_axes[ax] != 'c' ])
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
if self.scale_by_sharing:
# scale each learning rate by 1 / # times param is reused
h_rows, h_cols = self.h_space.shape
num_h = float(h_rows * h_cols)
return OrderedDict([(self.transformer._filters, 1./num_h),
(self.b, 1. / num_h)])
else:
return OrderedDict()
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return p
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return h
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
P, H = state
if tuple(self.output_axes) == ('b',0,1,'c'):
p_max = P.max(axis=(0,1,2))
p_min = P.min(axis=(0,1,2))
p_mean = P.mean(axis=(0,1,2))
else:
assert tuple(self.output_axes) == ('b','c',0,1)
p_max = P.max(axis=(0,2,3))
p_min = P.min(axis=(0,2,3))
p_mean = P.mean(axis=(0,2,3))
p_range = p_max - p_min
rval = {
'p_max_max' : p_max.max(),
'p_max_mean' : p_max.mean(),
'p_max_min' : p_max.min(),
'p_min_max' : p_min.max(),
'p_min_mean' : p_min.mean(),
'p_min_max' : p_min.max(),
'p_range_max' : p_range.max(),
'p_range_mean' : p_range.mean(),
'p_range_min' : p_range.min(),
'p_mean_max' : p_mean.max(),
'p_mean_mean' : p_mean.mean(),
'p_mean_min' : p_mean.min()
}
return rval
def get_weight_decay(self, coeffs):
"""
.. todo::
WRITEME
"""
W , = self.transformer.get_params()
return coeffs * T.sqr(W).sum()
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if not hasattr(state_below, 'ndim'):
raise TypeError("state_below should be a TensorType, got " +
str(state_below) + " of type " + str(type(state_below)))
if state_below.ndim != 4:
raise ValueError("state_below should have ndim 4, has "+str(state_below.ndim))
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = self.max_pool(z, (self.pool_rows, self.pool_cols), msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
if state_above is not None:
msg = layer_above.downward_message(state_above)
try:
self.output_space.validate(msg)
except TypeError as e:
reraise_as(TypeError(str(type(layer_above))+".downward_message gave something that was not the right type: "+str(e)))
else:
msg = None
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
p, h, p_sample, h_sample = self.max_pool(z,
(self.pool_rows, self.pool_cols), msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
self.h_space.validate(downward_state)
return self.transformer.lmul_T(downward_state)
def set_batch_size(self, batch_size):
"""
.. todo::
WRITEME
"""
self.transformer.set_batch_size(batch_size)
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
outp, inp, rows, cols = range(4)
raw = self.transformer._filters.get_value()
return np.transpose(raw,(outp,rows,cols,inp))
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
default_z = self.broadcasted_bias()
shape = {
'b': self.dbm.batch_size,
0: self.h_space.shape[0],
1: self.h_space.shape[1],
'c': self.h_space.num_channels
}
# work around theano bug with broadcasted stuff
default_z += T.alloc(*([0.]+[shape[elem] for elem in self.h_space.axes])).astype(default_z.dtype)
assert default_z.ndim == 4
p, h = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols))
return p, h
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
""" Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
t1 = time.time()
empty_input = self.h_space.get_origin_batch(self.dbm.batch_size)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.broadcasted_bias()
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
p_exp, h_exp, p_sample, h_sample = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols),
theano_rng = theano_rng)
p_state = sharedX( self.output_space.get_origin_batch(
self.dbm.batch_size))
t2 = time.time()
f = function([], updates = [
(p_state, p_sample),
(h_state, h_sample)
])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = (downward_state * self.broadcasted_bias()).sum(axis=(1,2,3))
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=(1,2,3))
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval
class ConvC01B_MaxPool(HiddenLayer):
"""
.. todo::
WRITEME
"""
def __init__(self,
output_channels,
kernel_shape,
pool_rows,
pool_cols,
layer_name,
center = False,
irange = None,
sparse_init = None,
scale_by_sharing = True,
init_bias = 0.,
pad = 0,
partial_sum = 1):
self.__dict__.update(locals())
del self.self
assert (irange is None) != (sparse_init is None)
self.output_axes = ('c', 0, 1, 'b')
self.detector_channels = output_channels
self.tied_b = 1
def broadcasted_bias(self):
"""
.. todo::
WRITEME
"""
if self.b.ndim != 1:
raise NotImplementedError()
shuffle = [ 'x' ] * 4
shuffle[self.output_axes.index('c')] = 0
return self.b.dimshuffle(*shuffle)
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return CompositeSpace((self.h_space, self.output_space))
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
""" Note: this resets parameters!"""
setup_detector_layer_c01b(layer=self,
input_space=space,
rng=self.dbm.rng,)
if not tuple(space.axes) == ('c', 0, 1, 'b'):
raise AssertionError("You're not using c01b inputs. Ian is enforcing c01b inputs while developing his pipeline to make sure it runs at maximal speed. If you really don't want to use c01b inputs, you can remove this check and things should work. If they don't work it's only because they're not tested.")
if self.dummy_channels != 0:
raise NotImplementedError(str(type(self))+" does not support adding dummy channels for cuda-convnet compatibility yet, you must implement that feature or use inputs with <=3 channels or a multiple of 4 channels")
self.input_rows = self.input_space.shape[0]
self.input_cols = self.input_space.shape[1]
self.h_rows = self.detector_space.shape[0]
self.h_cols = self.detector_space.shape[1]
if not(self.h_rows % self.pool_rows == 0):
raise ValueError(self.layer_name + ": h_rows = %d, pool_rows = %d. Should be divisible but remainder is %d" %
(self.h_rows, self.pool_rows, self.h_rows % self.pool_rows))
assert self.h_cols % self.pool_cols == 0
self.h_space = Conv2DSpace(shape = (self.h_rows, self.h_cols), num_channels = self.output_channels,
axes = self.output_axes)
self.output_space = Conv2DSpace(shape = (self.h_rows / self.pool_rows,
self.h_cols / self.pool_cols),
num_channels = self.output_channels,
axes = self.output_axes)
logger.info('{0} : detector shape: {1} '
'pool shape: {2}'.format(self.layer_name,
self.h_space.shape,
self.output_space.shape))
assert tuple(self.output_axes) == ('c', 0, 1, 'b')
self.max_pool = max_pool_c01b
if self.center:
p_ofs, h_ofs = self.init_mf_state()
self.p_offset = sharedX(self.output_space.get_origin(), 'p_offset')
self.h_offset = sharedX(self.h_space.get_origin(), 'h_offset')
f = function([], updates={self.p_offset: p_ofs[:,:,:,0], self.h_offset: h_ofs[:,:,:,0]})
f()
def get_params(self):
"""
.. todo::
WRITEME
"""
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
return [ W, self.b]
def state_to_b01c(self, state):
"""
.. todo::
WRITEME
"""
if tuple(self.output_axes) == ('b',0,1,'c'):
return state
return [ Conv2DSpace.convert(elem, self.output_axes, ('b', 0, 1, 'c'))
for elem in state ]
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
if c == 0.:
continue
# Range over everything but the channel index
# theano can only take gradient through max if the max is over 1 axis or all axes
# so I manually unroll the max for the case I use here
assert self.h_space.axes == ('b', 'c', 0, 1)
assert self.output_space.axes == ('b', 'c', 0, 1)
mx = s.max(axis=3).max(axis=2).max(axis=0)
assert hasattr(mx.owner.op, 'grad')
mn = s.min(axis=3).max(axis=2).max(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mx.ndim == 1
assert mn.ndim == 1
r = mx - mn
rval += (1. - r).mean() * c
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
"""
.. todo::
WRITEME properly
Parameters
----------
state : WRITEME
target : WRITEME
if pools contain more than one element, should be a list
with two elements. the first element is for the pooling
units and the second for the detector units.
coeff : WRITEME
eps : WRITEME
"""
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(target, float)
assert isinstance(coeff, float)
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = 0.
eps = [eps]
else:
if eps is None:
eps = [0., 0.]
assert all([len(elem) == 2 for elem in [state, target, coeff]])
p_target, h_target = target
if h_target > p_target and (coeff[0] != 0. and coeff[1] != 0.):
# note that, within each group, E[p] is the sum of E[h]
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
if c == 0.:
continue
# Average over everything but the channel index
m = s.mean(axis= [ ax for ax in range(4) if self.output_axes[ax] != 'c' ])
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
if self.scale_by_sharing:
# scale each learning rate by 1 / # times param is reused
h_rows, h_cols = self.h_space.shape
num_h = float(h_rows * h_cols)
rval[self.transformer._filters] = 1. /num_h
rval[self.b] = 1. / num_h
return rval
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return p
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return h
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
P, H = state
axes = tuple([i for i, ax in enumerate(self.output_axes) if ax != 'c'])
p_max = P.max(axis=(0,1,2))
p_min = P.min(axis=(0,1,2))
p_mean = P.mean(axis=(0,1,2))
p_range = p_max - p_min
rval = {
'p_max_max' : p_max.max(),
'p_max_mean' : p_max.mean(),
'p_max_min' : p_max.min(),
'p_min_max' : p_min.max(),
'p_min_mean' : p_min.mean(),
'p_min_max' : p_min.max(),
'p_range_max' : p_range.max(),
'p_range_mean' : p_range.mean(),
'p_range_min' : p_range.min(),
'p_mean_max' : p_mean.max(),
'p_mean_mean' : p_mean.mean(),
'p_mean_min' : p_mean.min()
}
return rval
def get_weight_decay(self, coeffs):
"""
.. todo::
WRITEME
"""
W , = self.transformer.get_params()
return coeffs * T.sqr(W).sum()
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if not hasattr(state_below, 'ndim'):
raise TypeError("state_below should be a TensorType, got " +
str(state_below) + " of type " + str(type(state_below)))
if state_below.ndim != 4:
raise ValueError("state_below should have ndim 4, has "+str(state_below.ndim))
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = self.max_pool(z, (self.pool_rows, self.pool_cols), msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("Need to update for C01B")
if state_above is not None:
msg = layer_above.downward_message(state_above)
try:
self.output_space.validate(msg)
except TypeError as e:
reraise_as(TypeError(str(type(layer_above))+".downward_message gave something that was not the right type: "+str(e)))
else:
msg = None
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
p, h, p_sample, h_sample = self.max_pool(z,
(self.pool_rows, self.pool_cols), msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
self.h_space.validate(downward_state)
return self.transformer.lmul_T(downward_state)
def set_batch_size(self, batch_size):
"""
.. todo::
WRITEME
"""
self.transformer.set_batch_size(batch_size)
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
return self.transformer.get_weights_topo()
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
default_z = self.broadcasted_bias()
shape = {
'b': self.dbm.batch_size,
0: self.h_space.shape[0],
1: self.h_space.shape[1],
'c': self.h_space.num_channels
}
# work around theano bug with broadcasted stuff
default_z += T.alloc(*([0.]+[shape[elem] for elem in self.h_space.axes])).astype(default_z.dtype)
assert default_z.ndim == 4
p, h = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols))
return p, h
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME properly
Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
raise NotImplementedError("Need to update for C01B")
t1 = time.time()
empty_input = self.h_space.get_origin_batch(self.dbm.batch_size)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.broadcasted_bias()
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
p_exp, h_exp, p_sample, h_sample = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols),
theano_rng = theano_rng)
p_state = sharedX( self.output_space.get_origin_batch(
self.dbm.batch_size))
t2 = time.time()
f = function([], updates = [
(p_state, p_sample),
(h_state, h_sample)
])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took {1}'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("Need to update for C01B")
self.input_space.validate(state_below)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = (downward_state * self.broadcasted_bias()).sum(axis=(1,2,3))
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=(1,2,3))
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval
class BVMP_Gaussian(BinaryVectorMaxPool):
"""
Like BinaryVectorMaxPool, but must have GaussianVisLayer
as its input. Uses its beta to bias the hidden units appropriately.
See gaussian.lyx
beta is *not* considered a parameter of this layer, it's just an
external factor influencing how this layer behaves.
Gradient can still flow to beta, but it will only be included in
the parameters list if some class other than this layer includes it.
.. todo::
WRITEME : parameter list
"""
def __init__(self,
input_layer,
detector_layer_dim,
pool_size,
layer_name,
irange = None,
sparse_init = None,
sparse_stdev = 1.,
include_prob = 1.0,
init_bias = 0.,
W_lr_scale = None,
b_lr_scale = None,
center = False,
mask_weights = None,
max_col_norm = None,
copies = 1):
warnings.warn("BVMP_Gaussian math is very faith-based, need to complete gaussian.lyx")
args = locals()
del args['input_layer']
del args['self']
super(BVMP_Gaussian, self).__init__(**args)
self.input_layer = input_layer
def get_weights(self):
"""
.. todo::
WRITEME
"""
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W ,= self.transformer.get_params()
W = W.get_value()
x = input("multiply by beta?")
if x == 'y':
beta = self.input_layer.beta.get_value()
return (W.T * beta).T
assert x == 'n'
return W
def set_weights(self, weights):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("beta would make get_weights for visualization not correspond to set_weights")
W, = self.transformer.get_params()
W.set_value(weights)
def set_biases(self, biases, recenter = False):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
if recenter:
assert self.center
if self.pool_size != 1:
raise NotImplementedError()
self.offset.set_value(sigmoid_numpy(self.b.get_value()))
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value() - self.beta_bias().eval()
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("need to account for beta")
if self.copies != 1:
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
if state_above is not None:
msg = layer_above.downward_message(state_above)
else:
msg = None
if self.requires_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
z = self.transformer.lmul(state_below) + self.b
p, h, p_sample, h_sample = max_pool_channels(z,
self.pool_size, msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
rval = self.transformer.lmul_T(downward_state)
if self.requires_reformat:
rval = self.desired_space.format_as(rval, self.input_space)
return rval * self.copies
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
# work around theano bug with broadcasted vectors
z = T.alloc(0., self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + \
self.b.dimshuffle('x', 0) + self.beta_bias()
rval = max_pool_channels(z = z,
pool_size = self.pool_size)
return rval
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME properly
Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
raise NotImplementedError("need to account for beta")
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
empty_input = self.h_space.get_origin_batch(num_examples)
empty_output = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
p_state = sharedX(empty_output)
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
default_z = T.zeros_like(h_state) + self.b
p_exp, h_exp, p_sample, h_sample = max_pool_channels(
z = default_z,
pool_size = self.pool_size,
theano_rng = theano_rng)
assert h_sample.dtype == default_z.dtype
f = function([], updates = [
(p_state , p_sample),
(h_state , h_sample)
])
f()
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("need to account for beta, and maybe some oether stuff")
# Don't need to do anything special for centering, upward_state / downward state
# make it all just work
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = T.dot(downward_state, self.b)
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=1)
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval * self.copies
def linear_feed_forward_approximation(self, state_below):
"""
.. todo::
WRITEME properly
Used to implement TorontoSparsity. Unclear exactly what properties of it are
important or how to implement it for other layers.
Properties it must have:
output is same kind of data structure (ie, tuple of theano 2-tensors)
as mf_update
Properties it probably should have for other layer types:
An infinitesimal change in state_below or the parameters should cause the same sign of change
in the output of linear_feed_forward_approximation and in mf_update
Should not have any non-linearities that cause the gradient to shrink
Should disregard top-down feedback
"""
raise NotImplementedError("need to account for beta")
z = self.transformer.lmul(state_below) + self.b
if self.pool_size != 1:
# Should probably implement sum pooling for the non-pooled version,
# but in reality it's not totally clear what the right answer is
raise NotImplementedError()
return z, z
def beta_bias(self):
"""
.. todo::
WRITEME
"""
W, = self.transformer.get_params()
beta = self.input_layer.beta
assert beta.ndim == 1
return - 0.5 * T.dot(beta, T.sqr(W))
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.b + self.beta_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = max_pool_channels(z, self.pool_size, msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
class CompositeLayer(HiddenLayer):
"""
A Layer constructing by aligning several other Layer
objects side by side
Parameters
----------
components : WRITEME
A list of layers that are combined to form this layer
inputs_to_components : None or dict mapping int to list of int
Should be None unless the input space is a CompositeSpace
If inputs_to_components[i] contains j, it means input i will
be given as input to component j.
If an input dodes not appear in the dictionary, it will be given
to all components.
This field allows one CompositeLayer to have another as input
without forcing each component to connect to all members
of the CompositeLayer below. For example, you might want to
have both densely connected and convolutional units in all
layers, but a convolutional unit is incapable of taking a
non-topological input space.
"""
def __init__(self, layer_name, components, inputs_to_components = None):
self.layer_name = layer_name
self.components = list(components)
assert isinstance(components, list)
for component in components:
assert isinstance(component, HiddenLayer)
self.num_components = len(components)
self.components = list(components)
if inputs_to_components is None:
self.inputs_to_components = None
else:
if not isinstance(inputs_to_components, dict):
raise TypeError("CompositeLayer expected inputs_to_components to be a dict, got "+str(type(inputs_to_components)))
self.inputs_to_components = OrderedDict()
for key in inputs_to_components:
assert isinstance(key, int)
assert key >= 0
value = inputs_to_components[key]
assert isinstance(value, list)
assert all([isinstance(elem, int) for elem in value])
assert min(value) >= 0
assert max(value) < self.num_components
self.inputs_to_components[key] = list(value)
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
if not isinstance(space, CompositeSpace):
assert self.inputs_to_components is None
self.routing_needed = False
else:
if self.inputs_to_components is None:
self.routing_needed = False
else:
self.routing_needed = True
assert max(self.inputs_to_components) < space.num_components
# Invert the dictionary
self.components_to_inputs = OrderedDict()
for i in xrange(self.num_components):
inputs = []
for j in xrange(space.num_components):
if i in self.inputs_to_components[j]:
inputs.append(i)
if len(inputs) < space.num_components:
self.components_to_inputs[i] = inputs
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_space = space.restrict(self.components_to_inputs[i])
else:
cur_space = space
component.set_input_space(cur_space)
self.output_space = CompositeSpace([ component.get_output_space() for component in self.components ])
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
return tuple(component.make_state(num_examples, numpy_rng) for
component in self.components)
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return CompositeSpace([component.get_total_state_space() for component in self.components])
def set_batch_size(self, batch_size):
"""
.. todo::
WRITEME
"""
for component in self.components:
component.set_batch_size(batch_size)
def set_dbm(self, dbm):
"""
.. todo::
WRITEME
"""
for component in self.components:
component.set_dbm(dbm)
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
rval = []
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_state_below =self.input_space.restrict_batch(state_below, self.components_to_inputs[i])
else:
cur_state_below = state_below
class RoutingLayer(object):
def __init__(self, idx, layer):
self.__dict__.update(locals())
del self.self
self.layer_name = 'route_'+str(idx)+'_'+layer.layer_name
def downward_message(self, state):
return self.layer.downward_message(state)[self.idx]
if layer_above is not None:
cur_layer_above = RoutingLayer(i, layer_above)
else:
cur_layer_above = None
mf_update = component.mf_update(state_below = cur_state_below,
state_above = state_above,
layer_above = cur_layer_above,
double_weights = double_weights,
iter_name = iter_name)
rval.append(mf_update)
return tuple(rval)
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
return tuple([component.init_mf_state() for component in self.components])
def get_weight_decay(self, coeffs):
"""
.. todo::
WRITEME
"""
return sum([component.get_weight_decay(coeff) for component, coeff
in safe_zip(self.components, coeffs)])
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
return tuple([component.upward_state(elem)
for component, elem in
safe_zip(self.components, total_state)])
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
return tuple([component.downward_state(elem)
for component, elem in
safe_zip(self.components, total_state)])
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
if isinstance(self.input_space, CompositeSpace):
num_input_components = self.input_space.num_components
else:
num_input_components = 1
rval = [ None ] * num_input_components
def add(x, y):
if x is None:
return y
if y is None:
return x
return x + y
for i, packed in enumerate(safe_zip(self.components, downward_state)):
component, state = packed
if self.routing_needed and i in self.components_to_inputs:
input_idx = self.components_to_inputs[i]
else:
input_idx = range(num_input_components)
partial_message = component.downward_message(state)
if len(input_idx) == 1:
partial_message = [ partial_message ]
assert len(input_idx) == len(partial_message)
for idx, msg in safe_zip(input_idx, partial_message):
rval[idx] = add(rval[idx], msg)
if len(rval) == 1:
rval = rval[0]
else:
rval = tuple(rval)
self.input_space.validate(rval)
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
"""
.. todo::
WRITEME
"""
return sum([ comp.get_l1_act_cost(s, t, c, e) \
for comp, s, t, c, e in safe_zip(self.components, state, target, coeff, eps)])
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
return sum([comp.get_range_rewards(s, c)
for comp, s, c in safe_zip(self.components, state, coeffs)])
def get_params(self):
"""
.. todo::
WRITEME
"""
return reduce(lambda x, y: safe_union(x, y),
[component.get_params() for component in self.components])
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
logger.info('Get topological weights for which layer?')
for i, component in enumerate(self.components):
logger.info('{0} {1}'.format(i, component.layer_name))
x = input()
return self.components[int(x)].get_weights_topo()
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
for layer, s in safe_zip(self.components, state):
d = layer.get_monitoring_channels_from_state(s)
for key in d:
rval[layer.layer_name+'_'+key] = d[key]
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
rval = []
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_state_below =self.input_space.restrict_batch(state_below, self.components_to_inputs[i])
else:
cur_state_below = state_below
class RoutingLayer(object):
def __init__(self, idx, layer):
self.__dict__.update(locals())
del self.self
self.layer_name = 'route_'+str(idx)+'_'+layer.layer_name
def downward_message(self, state):
return self.layer.downward_message(state)[self.idx]
if layer_above is not None:
cur_layer_above = RoutingLayer(i, layer_above)
else:
cur_layer_above = None
sample = component.sample(state_below = cur_state_below,
state_above = state_above,
layer_above = cur_layer_above,
theano_rng = theano_rng)
rval.append(sample)
return tuple(rval)
| 30.601683 | 315 | 0.55708 | from __future__ import print_function
__authors__ = ["Ian Goodfellow", "Vincent Dumoulin"]
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import functools
import logging
import numpy as np
import operator
from theano.compat.six.moves import input, reduce, xrange
import time
import warnings
from theano import tensor as T, function, config
import theano
from theano.gof.op import get_debug_values
from theano.printing import Print
from pylearn2.compat import OrderedDict
from pylearn2.expr.nnet import sigmoid_numpy
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels, max_pool_b01c, max_pool, max_pool_c01b
from pylearn2.linear.conv2d import make_random_conv2D, make_sparse_random_conv2D
from pylearn2.linear.conv2d_c01b import setup_detector_layer_c01b
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.models import Model
from pylearn2.models.dbm import init_sigmoid_bias_from_marginals
from pylearn2.space import VectorSpace, CompositeSpace, Conv2DSpace, Space
from pylearn2.utils import is_block_gradient
from pylearn2.utils import sharedX, safe_zip, py_integer_types, block_gradient
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_theano_rng
from pylearn2.utils import safe_union
logger = logging.getLogger(__name__)
class Layer(Model):
def get_dbm(self):
if hasattr(self, 'dbm'):
return self.dbm
return None
def set_dbm(self, dbm):
assert self.get_dbm() is None
self.dbm = dbm
def get_total_state_space(self):
raise NotImplementedError(str(type(self))+" does not implement " +\
"get_total_state_space()")
def get_monitoring_channels(self):
return OrderedDict()
def get_monitoring_channels_from_state(self, state):
return OrderedDict()
def upward_state(self, total_state):
return total_state
def make_state(self, num_examples, numpy_rng):
raise NotImplementedError("%s doesn't implement make_state" %
type(self))
def make_symbolic_state(self, num_examples, theano_rng):
raise NotImplementedError("%s doesn't implement make_symbolic_state" %
type(self))
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
if hasattr(self, 'get_sampling_updates'):
raise AssertionError("Looks like "+str(type(self))+" needs to rename get_sampling_updates to sample.")
raise NotImplementedError("%s doesn't implement sample" %
type(self))
def expected_energy_term(self, state,
average,
state_below,
average_below):
raise NotImplementedError(str(type(self))+" does not implement expected_energy_term.")
def finalize_initialization(self):
pass
class VisibleLayer(Layer):
def get_total_state_space(self):
return self.get_input_space()
class HiddenLayer(Layer):
def downward_state(self, total_state):
return total_state
def get_stdev_rewards(self, state, coeffs):
raise NotImplementedError(str(type(self))+" does not implement get_stdev_rewards")
def get_range_rewards(self, state, coeffs):
raise NotImplementedError(str(type(self))+" does not implement get_range_rewards")
def get_l1_act_cost(self, state, target, coeff, eps):
raise NotImplementedError(str(type(self))+" does not implement get_l1_act_cost")
def get_l2_act_cost(self, state, target, coeff):
raise NotImplementedError(str(type(self))+" does not implement get_l2_act_cost")
class BinaryVector(VisibleLayer):
def __init__(self,
nvis,
bias_from_marginals = None,
center = False,
copies = 1, learn_init_inpainting_state = False):
super(BinaryVector, self).__init__()
self.__dict__.update(locals())
del self.self
# Don't serialize the dataset
del self.bias_from_marginals
self.space = VectorSpace(nvis)
self.input_space = self.space
origin = self.space.get_origin()
if bias_from_marginals is None:
init_bias = np.zeros((nvis,))
else:
init_bias = init_sigmoid_bias_from_marginals(bias_from_marginals)
self.bias = sharedX(init_bias, 'visible_bias')
if center:
self.offset = sharedX(sigmoid_numpy(init_bias))
def get_biases(self):
return self.bias.get_value()
def set_biases(self, biases, recenter=False):
self.bias.set_value(biases)
if recenter:
assert self.center
self.offset.set_value(sigmoid_numpy(self.bias.get_value()))
def upward_state(self, total_state):
if not hasattr(self, 'center'):
self.center = False
if self.center:
rval = total_state - self.offset
else:
rval = total_state
if not hasattr(self, 'copies'):
self.copies = 1
return rval * self.copies
def get_params(self):
return [self.bias]
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
assert state_below is None
if self.copies != 1:
raise NotImplementedError()
msg = layer_above.downward_message(state_above)
bias = self.bias
z = msg + bias
phi = T.nnet.sigmoid(z)
rval = theano_rng.binomial(size = phi.shape, p = phi, dtype = phi.dtype,
n = 1 )
return rval
def mf_update(self, state_above, layer_above):
msg = layer_above.downward_message(state_above)
mu = self.bias
z = msg + mu
rval = T.nnet.sigmoid(z)
return rval
def make_state(self, num_examples, numpy_rng):
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
driver = numpy_rng.uniform(0.,1., (num_examples, self.nvis))
mean = sigmoid_numpy(self.bias.get_value())
sample = driver < mean
rval = sharedX(sample, name = 'v_sample_shared')
return rval
def make_symbolic_state(self, num_examples, theano_rng):
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
mean = T.nnet.sigmoid(self.bias)
rval = theano_rng.binomial(size=(num_examples, self.nvis), p=mean,
dtype=theano.config.floatX)
return rval
def expected_energy_term(self, state, average, state_below = None, average_below = None):
if self.center:
state = state - self.offset
assert state_below is None
assert average_below is None
assert average in [True, False]
self.space.validate(state)
rval = -T.dot(state, self.bias)
assert rval.ndim == 1
return rval * self.copies
def init_inpainting_state(self, V, drop_mask, noise = False, return_unmasked = False):
assert drop_mask is None or drop_mask.ndim > 1
unmasked = T.nnet.sigmoid(self.bias.dimshuffle('x',0))
assert unmasked.ndim == 2
assert hasattr(unmasked.owner.op, 'scalar_op')
if drop_mask is not None:
masked_mean = unmasked * drop_mask
else:
masked_mean = unmasked
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = 0
if not self.learn_init_inpainting_state:
masked_mean = block_gradient(masked_mean)
masked_mean.name = 'masked_mean'
if noise:
theano_rng = theano.sandbox.rng_mrg.MRG_RandomStreams(42)
unmasked = T.nnet.sigmoid(theano_rng.normal(avg = 0.,
std = 1., size = masked_mean.shape,
dtype = masked_mean.dtype))
masked_mean = unmasked * drop_mask
masked_mean.name = 'masked_noise'
if drop_mask is None:
rval = masked_mean
else:
masked_V = V * (1-drop_mask)
rval = masked_mean + masked_V
rval.name = 'init_inpainting_state'
if return_unmasked:
assert unmasked.ndim > 1
return rval, unmasked
return rval
def inpaint_update(self, state_above, layer_above, drop_mask = None, V = None, return_unmasked = False):
msg = layer_above.downward_message(state_above)
mu = self.bias
z = msg + mu
z.name = 'inpainting_z_[unknown_iter]'
unmasked = T.nnet.sigmoid(z)
if drop_mask is not None:
rval = drop_mask * unmasked + (1-drop_mask) * V
else:
rval = unmasked
rval.name = 'inpainted_V[unknown_iter]'
if return_unmasked:
owner = unmasked.owner
assert owner is not None
op = owner.op
assert hasattr(op, 'scalar_op')
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
return rval, unmasked
return rval
def recons_cost(self, V, V_hat_unmasked, drop_mask = None, use_sum=False):
if use_sum:
raise NotImplementedError()
V_hat = V_hat_unmasked
assert hasattr(V_hat, 'owner')
owner = V_hat.owner
assert owner is not None
op = owner.op
block_grad = False
if is_block_gradient(op):
assert isinstance(op.scalar_op, theano.scalar.Identity)
block_grad = True
real, = owner.inputs
owner = real.owner
op = owner.op
if not hasattr(op, 'scalar_op'):
raise ValueError("Expected V_hat_unmasked to be generated by an Elemwise op, got "+str(op)+" of type "+str(type(op)))
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
z ,= owner.inputs
if block_grad:
z = block_gradient(z)
if V.ndim != V_hat.ndim:
raise ValueError("V and V_hat_unmasked should have same ndim, but are %d and %d." % (V.ndim, V_hat.ndim))
unmasked_cost = V * T.nnet.softplus(-z) + (1 - V) * T.nnet.softplus(z)
assert unmasked_cost.ndim == V_hat.ndim
if drop_mask is None:
masked_cost = unmasked_cost
else:
masked_cost = drop_mask * unmasked_cost
return masked_cost.mean()
class BinaryVectorMaxPool(HiddenLayer):
def __init__(self,
detector_layer_dim,
pool_size,
layer_name,
irange = None,
sparse_init = None,
sparse_stdev = 1.,
include_prob = 1.0,
init_bias = 0.,
W_lr_scale = None,
b_lr_scale = None,
center = False,
mask_weights = None,
max_col_norm = None,
copies = 1):
super(BinaryVectorMaxPool, self).__init__()
self.__dict__.update(locals())
del self.self
self.b = sharedX( np.zeros((self.detector_layer_dim,)) + init_bias, name = layer_name + '_b')
if self.center:
if self.pool_size != 1:
raise NotImplementedError()
self.offset = sharedX(sigmoid_numpy(self.b.get_value()))
def get_lr_scalers(self):
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
def set_input_space(self, space):
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if not (self.detector_layer_dim % self.pool_size == 0):
raise ValueError("detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d" %
(self.detector_layer_dim, self.pool_size, self.detector_layer_dim % self.pool_size))
self.h_space = VectorSpace(self.detector_layer_dim)
self.pool_layer_dim = self.detector_layer_dim / self.pool_size
self.output_space = VectorSpace(self.pool_layer_dim)
rng = self.dbm.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.detector_layer_dim)) * \
(rng.uniform(0.,1., (self.input_dim, self.detector_layer_dim))
< self.include_prob)
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.detector_layer_dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.detector_layer_dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W ,= self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.detector_layer_dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape "+str(expected_shape)+" but got "+str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@functools.wraps(Model._modify_updates)
def _modify_updates(self, updates):
if not hasattr(self, 'mask_weights'):
self.mask_weights = None
if not hasattr(self, 'max_col_norm'):
self.max_col_norm = None
if self.mask_weights is not None:
W ,= self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
if self.max_col_norm is not None:
W, = self.transformer.get_params()
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
desired_norms = T.clip(col_norms, 0, self.max_col_norm)
updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
def get_total_state_space(self):
return CompositeSpace((self.output_space, self.h_space))
def get_params(self):
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W ,= self.transformer.get_params()
return coeff * T.sqr(W).sum()
def get_weights(self):
if self.requires_reformat:
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W ,= self.transformer.get_params()
return W.get_value()
def set_weights(self, weights):
W, = self.transformer.get_params()
W.set_value(weights)
def set_biases(self, biases, recenter = False):
self.b.set_value(biases)
if recenter:
assert self.center
if self.pool_size != 1:
raise NotImplementedError()
self.offset.set_value(sigmoid_numpy(self.b.get_value()))
def get_biases(self):
return self.b.get_value()
def get_weights_format(self):
return ('v', 'h')
def get_weights_view_shape(self):
total = self.detector_layer_dim
cols = self.pool_size
if cols == 1:
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total / cols
return rows, cols
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W ,= self.transformer.get_params()
W = W.T
W = W.reshape((self.detector_layer_dim, self.input_space.shape[0],
self.input_space.shape[1], self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
def upward_state(self, total_state):
p,h = total_state
self.h_space.validate(h)
self.output_space.validate(p)
if not hasattr(self, 'center'):
self.center = False
if self.center:
return p - self.offset
if not hasattr(self, 'copies'):
self.copies = 1
return p * self.copies
def downward_state(self, total_state):
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
return h - self.offset
return h * self.copies
def get_monitoring_channels(self):
W ,= self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
return OrderedDict([
('row_norms_min' , row_norms.min()),
('row_norms_mean' , row_norms.mean()),
('row_norms_max' , row_norms.max()),
('col_norms_min' , col_norms.min()),
('col_norms_mean' , col_norms.mean()),
('col_norms_max' , col_norms.max()),
])
def get_monitoring_channels_from_state(self, state):
P, H = state
rval = OrderedDict()
if self.pool_size == 1:
vars_and_prefixes = [ (P,'') ]
else:
vars_and_prefixes = [ (P, 'p_'), (H, 'h_') ]
for var, prefix in vars_and_prefixes:
v_max = var.max(axis=0)
v_min = var.min(axis=0)
v_mean = var.mean(axis=0)
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over e*x*amples"
# The x and u are included in the name because otherwise its hard
# to remember which axis is which when reading the monitor
# I use inner.outer rather than outer_of_inner or something like that
# because I want mean_x.* to appear next to each other in the alphabetical
# list, as these are commonly plotted together
for key, val in [
('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())
]:
rval[prefix+key] = val
return rval
def get_stdev_rewards(self, state, coeffs):
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if isinstance(coeffs, str):
coeffs = float(coeffs)
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
assert all([isinstance(elem, float) for elem in [c]])
if c == 0.:
continue
mn = s.mean(axis=0)
dev = s - mn
stdev = T.sqrt(T.sqr(dev).mean(axis=0))
rval += (0.5 - stdev).mean()*c
return rval
def get_range_rewards(self, state, coeffs):
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if isinstance(coeffs, str):
coeffs = float(coeffs)
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
assert all([isinstance(elem, float) for elem in [c]])
if c == 0.:
continue
mx = s.max(axis=0)
assert hasattr(mx.owner.op, 'grad')
assert mx.ndim == 1
mn = s.min(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mn.ndim == 1
r = mx - mn
rval += (1 - r).mean()*c
return rval
def get_l1_act_cost(self, state, target, coeff, eps = None):
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if not isinstance(target, float):
raise TypeError("BinaryVectorMaxPool.get_l1_act_cost expected target of type float " + \
" but an instance named "+self.layer_name + " got target "+str(target) + " of type "+str(type(target)))
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = [0.]
else:
eps = [eps]
else:
assert all([len(elem) == 2 for elem in [state, target, coeff]])
if eps is None:
eps = [0., 0.]
if target[1] > target[0]:
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
assert all([isinstance(elem, float) or hasattr(elem, 'dtype') for elem in [t, c, e]])
if c == 0.:
continue
m = s.mean(axis=0)
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_l2_act_cost(self, state, target, coeff):
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if not isinstance(target, float):
raise TypeError("BinaryVectorMaxPool.get_l1_act_cost expected target of type float " + \
" but an instance named "+self.layer_name + " got target "+str(target) + " of type "+str(type(target)))
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
_, state = state
state = [state]
target = [target]
coeff = [coeff]
else:
assert all([len(elem) == 2 for elem in [state, target, coeff]])
if target[1] > target[0]:
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c in safe_zip(state, target, coeff):
assert all([isinstance(elem, float) or hasattr(elem, 'dtype') for elem in [t, c]])
if c == 0.:
continue
m = s.mean(axis=0)
assert m.ndim == 1
rval += T.square(m-t).mean()*c
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
if self.copies != 1:
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
if state_above is not None:
msg = layer_above.downward_message(state_above)
else:
msg = None
if self.requires_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
z = self.transformer.lmul(state_below) + self.b
p, h, p_sample, h_sample = max_pool_channels(z,
self.pool_size, msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
self.h_space.validate(downward_state)
rval = self.transformer.lmul_T(downward_state)
if self.requires_reformat:
rval = self.desired_space.format_as(rval, self.input_space)
return rval * self.copies
def init_mf_state(self):
# work around theano bug with broadcasted vectors
z = T.alloc(0., self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + \
self.b.dimshuffle('x', 0)
rval = max_pool_channels(z = z,
pool_size = self.pool_size)
return rval
def make_state(self, num_examples, numpy_rng):
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
empty_input = self.h_space.get_origin_batch(num_examples)
empty_output = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
p_state = sharedX(empty_output)
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16), which_method="binomial")
default_z = T.zeros_like(h_state) + self.b
p_exp, h_exp, p_sample, h_sample = max_pool_channels(
z = default_z,
pool_size = self.pool_size,
theano_rng = theano_rng)
assert h_sample.dtype == default_z.dtype
f = function([], updates = [
(p_state , p_sample),
(h_state , h_sample)
])
f()
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def make_symbolic_state(self, num_examples, theano_rng):
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
default_z = T.alloc(self.b, num_examples, self.detector_layer_dim)
p_exp, h_exp, p_sample, h_sample = max_pool_channels(z=default_z,
pool_size=self.pool_size,
theano_rng=theano_rng)
assert h_sample.dtype == default_z.dtype
return p_sample, h_sample
def expected_energy_term(self, state, average, state_below, average_below):
# Don't need to do anything special for centering, upward_state / downward state
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
bias_term = T.dot(downward_state, self.b)
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=1)
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval * self.copies
def linear_feed_forward_approximation(self, state_below):
z = self.transformer.lmul(state_below) + self.b
if self.pool_size != 1:
raise NotImplementedError()
return z, z
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.b
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = max_pool_channels(z, self.pool_size, msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
class Softmax(HiddenLayer):
presynaptic_name = "presynaptic_Y_hat"
def __init__(self, n_classes, layer_name, irange = None,
sparse_init = None, sparse_istdev = 1., W_lr_scale = None,
b_lr_scale = None,
max_col_norm = None,
copies = 1, center = False,
learn_init_inpainting_state = True):
super(Softmax, self).__init__()
if isinstance(W_lr_scale, str):
W_lr_scale = float(W_lr_scale)
self.__dict__.update(locals())
del self.self
assert isinstance(n_classes, py_integer_types)
self.output_space = VectorSpace(n_classes)
self.b = sharedX( np.zeros((n_classes,)), name = 'softmax_b')
if self.center:
b = self.b.get_value()
self.offset = sharedX(np.exp(b) / np.exp(b).sum())
@functools.wraps(Model._modify_updates)
def _modify_updates(self, updates):
if not hasattr(self, 'max_col_norm'):
self.max_col_norm = None
if self.max_col_norm is not None:
W = self.W
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
desired_norms = T.clip(col_norms, 0, self.max_col_norm)
updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
@functools.wraps(Model.get_lr_scalers)
def get_lr_scalers(self):
rval = OrderedDict()
# Patch old pickle files
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if self.W_lr_scale is not None:
assert isinstance(self.W_lr_scale, float)
rval[self.W] = self.W_lr_scale
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
if self.b_lr_scale is not None:
assert isinstance(self.b_lr_scale, float)
rval[self.b] = self.b_lr_scale
return rval
def get_total_state_space(self):
return self.output_space
def get_monitoring_channels_from_state(self, state):
mx = state.max(axis=1)
return OrderedDict([
('mean_max_class' , mx.mean()),
('max_max_class' , mx.max()),
('min_max_class' , mx.min())
])
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, Space):
raise TypeError("Expected Space, got "+
str(space)+" of type "+str(type(space)))
self.input_dim = space.get_total_dimension()
self.needs_reformat = not isinstance(space, VectorSpace)
self.desired_space = VectorSpace(self.input_dim)
if not self.needs_reformat:
assert self.desired_space == self.input_space
rng = self.dbm.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,self.irange, (self.input_dim,self.n_classes))
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.n_classes))
for i in xrange(self.n_classes):
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0.:
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn() * self.sparse_istdev
self.W = sharedX(W, 'softmax_W' )
self._params = [ self.b, self.W ]
def get_weights_topo(self):
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
desired = self.W.get_value().T
ipt = self.desired_space.format_as(desired, self.input_space)
rval = Conv2DSpace.convert_numpy(ipt, self.input_space.axes, ('b', 0, 1, 'c'))
return rval
def get_weights(self):
if not isinstance(self.input_space, VectorSpace):
raise NotImplementedError()
return self.W.get_value()
def set_weights(self, weights):
self.W.set_value(weights)
def set_biases(self, biases, recenter=False):
self.b.set_value(biases)
if recenter:
assert self.center
self.offset.set_value( (np.exp(biases) / np.exp(biases).sum()).astype(self.offset.dtype))
def get_biases(self):
return self.b.get_value()
def get_weights_format(self):
return ('v', 'h')
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
if self.copies != 1:
raise NotImplementedError("need to draw self.copies samples and average them together.")
if state_above is not None:
# If you implement this case, also add a unit test for it.
# Or at least add a warning that it is not tested.
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
self.input_space.validate(state_below)
# patch old pickle files
if not hasattr(self, 'needs_reformat'):
self.needs_reformat = self.needs_reshape
del self.needs_reshape
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
self.desired_space.validate(state_below)
z = T.dot(state_below, self.W) + self.b
h_exp = T.nnet.softmax(z)
h_sample = theano_rng.multinomial(pvals = h_exp, dtype = h_exp.dtype)
return h_sample
def mf_update(self, state_below, state_above = None, layer_above = None, double_weights = False, iter_name = None):
if state_above is not None:
raise NotImplementedError()
if double_weights:
raise NotImplementedError()
self.input_space.validate(state_below)
# patch old pickle files
if not hasattr(self, 'needs_reformat'):
self.needs_reformat = self.needs_reshape
del self.needs_reshape
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
for value in get_debug_values(state_below):
if value.shape[0] != self.dbm.batch_size:
raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))
self.desired_space.validate(state_below)
assert self.W.ndim == 2
assert state_below.ndim == 2
b = self.b
Z = T.dot(state_below, self.W) + b
rval = T.nnet.softmax(Z)
for value in get_debug_values(rval):
assert value.shape[0] == self.dbm.batch_size
return rval
def downward_message(self, downward_state):
if not hasattr(self, 'copies'):
self.copies = 1
rval = T.dot(downward_state, self.W.T) * self.copies
rval = self.desired_space.format_as(rval, self.input_space)
return rval
def recons_cost(self, Y, Y_hat_unmasked, drop_mask_Y, scale):
Y_hat = Y_hat_unmasked
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
masked = log_prob_of * drop_mask_Y
assert masked.ndim == 1
rval = masked.mean() * scale * self.copies
return - rval
def init_mf_state(self):
rval = T.nnet.softmax(self.b.dimshuffle('x', 0)) + T.alloc(0., self.dbm.batch_size, self.n_classes).astype(config.floatX)
return rval
def make_state(self, num_examples, numpy_rng):
if self.copies != 1:
raise NotImplementedError("need to make self.copies samples and average them together.")
t1 = time.time()
empty_input = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.b
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
h_exp = T.nnet.softmax(default_z)
h_sample = theano_rng.multinomial(pvals = h_exp, dtype = h_exp.dtype)
h_state = sharedX( self.output_space.get_origin_batch(
num_examples))
t2 = time.time()
f = function([], updates = [(
h_state , h_sample
)])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took {1}'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
h_state.name = 'softmax_sample_shared'
return h_state
def make_symbolic_state(self, num_examples, theano_rng):
if self.copies != 1:
raise NotImplementedError("need to make self.copies samples and average them together.")
default_z = T.alloc(self.b, num_examples, self.n_classes)
h_exp = T.nnet.softmax(default_z)
h_sample = theano_rng.multinomial(pvals=h_exp, dtype=h_exp.dtype)
return h_sample
def get_weight_decay(self, coeff):
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
return coeff * T.sqr(self.W).sum()
def upward_state(self, state):
if self.center:
return state - self.offset
return state
def downward_state(self, state):
if not hasattr(self, 'center'):
self.center = False
if self.center:
return state - self.offset
return state
def expected_energy_term(self, state, average, state_below, average_below):
if self.center:
state = state - self.offset
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
self.desired_space.validate(state_below)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = T.dot(state, self.b)
weights_term = (T.dot(state_below, self.W) * state).sum(axis=1)
rval = -bias_term - weights_term
rval *= self.copies
assert rval.ndim == 1
return rval
def init_inpainting_state(self, Y, noise):
if noise:
theano_rng = make_theano_rng(None, 2012+10+30, which_method="binomial")
return T.nnet.softmax(theano_rng.normal(avg=0., size=Y.shape, std=1., dtype='float32'))
rval = T.nnet.softmax(self.b)
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = 1
if not self.learn_init_inpainting_state:
rval = block_gradient(rval)
return rval
def install_presynaptic_outputs(self, outputs_dict, batch_size):
assert self.presynaptic_name not in outputs_dict
outputs_dict[self.presynaptic_name] = self.output_space.make_shared_batch(batch_size, self.presynaptic_name)
class GaussianVisLayer(VisibleLayer):
def __init__(self,
rows = None,
cols = None,
learn_init_inpainting_state=True,
channels = None,
nvis = None,
init_beta = 1.,
min_beta = 1.,
init_mu = None,
tie_beta = None,
tie_mu = None,
bias_from_marginals = None,
beta_lr_scale = 'by_sharing',
axes = ('b', 0, 1, 'c')):
warnings.warn("GaussianVisLayer math very faith based, need to finish working through gaussian.lyx")
self.__dict__.update(locals())
del self.self
if bias_from_marginals is not None:
del self.bias_from_marginals
if self.nvis is None:
raise NotImplementedError()
assert init_mu is None
init_mu = bias_from_marginals.X.mean(axis=0)
if init_mu is None:
init_mu = 0.
if nvis is None:
assert rows is not None
assert cols is not None
assert channels is not None
self.space = Conv2DSpace(shape=[rows,cols], num_channels=channels, axes=axes)
# To make GaussianVisLayer compatible with any axis ordering
self.batch_axis=list(axes).index('b')
self.axes_to_sum = list(range(len(axes)))
self.axes_to_sum.remove(self.batch_axis)
else:
assert rows is None
assert cols is None
assert channels is None
self.space = VectorSpace(nvis)
self.axes_to_sum = 1
self.batch_axis = None
self.input_space = self.space
origin = self.space.get_origin()
beta_origin = origin.copy()
assert tie_beta in [ None, 'locations']
if tie_beta == 'locations':
assert nvis is None
beta_origin = np.zeros((self.space.num_channels,))
self.beta = sharedX(beta_origin + init_beta,name = 'beta')
assert self.beta.ndim == beta_origin.ndim
mu_origin = origin.copy()
assert tie_mu in [None, 'locations']
if tie_mu == 'locations':
assert nvis is None
mu_origin = np.zeros((self.space.num_channels,))
self.mu = sharedX( mu_origin + init_mu, name = 'mu')
assert self.mu.ndim == mu_origin.ndim
def get_monitoring_channels(self):
rval = OrderedDict()
rval['beta_min'] = self.beta.min()
rval['beta_mean'] = self.beta.mean()
rval['beta_max'] = self.beta.max()
return rval
def get_params(self):
if self.mu is None:
return [self.beta]
return [self.beta, self.mu]
def get_lr_scalers(self):
rval = OrderedDict()
if self.nvis is None:
rows, cols = self.space.shape
num_loc = float(rows * cols)
assert self.tie_beta in [None, 'locations']
if self.beta_lr_scale == 'by_sharing':
if self.tie_beta == 'locations':
assert self.nvis is None
rval[self.beta] = 1. / num_loc
elif self.beta_lr_scale == None:
pass
else:
rval[self.beta] = self.beta_lr_scale
assert self.tie_mu in [None, 'locations']
if self.tie_mu == 'locations':
warn = True
assert self.nvis is None
rval[self.mu] = 1./num_loc
logger.warning("mu lr_scaler hardcoded to 1/sharing")
return rval
@functools.wraps(Model._modify_updates)
def _modify_updates(self, updates):
if self.beta in updates:
updated_beta = updates[self.beta]
updates[self.beta] = T.clip(updated_beta,
self.min_beta,1e6)
def set_biases(self, bias):
self.mu = sharedX(bias, name = 'mu')
def broadcasted_mu(self):
if self.tie_mu == 'locations':
def f(x):
if x == 'c':
return 0
return 'x'
axes = [f(ax) for ax in self.axes]
rval = self.mu.dimshuffle(*axes)
else:
assert self.tie_mu is None
if self.nvis is None:
axes = [0, 1, 2]
axes.insert(self.axes.index('b'), 'x')
rval = self.mu.dimshuffle(*axes)
else:
rval = self.mu.dimshuffle('x', 0)
self.input_space.validate(rval)
return rval
def broadcasted_beta(self):
return self.broadcast_beta(self.beta)
def broadcast_beta(self, beta):
if self.tie_beta == 'locations':
def f(x):
if x == 'c':
return 0
return 'x'
axes = [f(ax) for ax in self.axes]
rval = beta.dimshuffle(*axes)
else:
assert self.tie_beta is None
if self.nvis is None:
axes = [0, 1, 2]
axes.insert(self.axes.index('b'), 'x')
rval = beta.dimshuffle(*axes)
else:
rval = beta.dimshuffle('x', 0)
self.input_space.validate(rval)
return rval
def init_inpainting_state(self, V, drop_mask, noise = False, return_unmasked = False):
unmasked = self.broadcasted_mu()
if drop_mask is None:
assert not noise
assert not return_unmasked
return unmasked
masked_mu = unmasked * drop_mask
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = True
if not self.learn_init_inpainting_state:
masked_mu = block_gradient(masked_mu)
masked_mu.name = 'masked_mu'
if noise:
theano_rng = make_theano_rng(None, 42, which_method="binomial")
unmasked = theano_rng.normal(avg = 0.,
std = 1., size = masked_mu.shape,
dtype = masked_mu.dtype)
masked_mu = unmasked * drop_mask
masked_mu.name = 'masked_noise'
masked_V = V * (1-drop_mask)
rval = masked_mu + masked_V
rval.name = 'init_inpainting_state'
if return_unmasked:
return rval, unmasked
return rval
def expected_energy_term(self, state, average, state_below = None, average_below = None):
assert state_below is None
assert average_below is None
self.space.validate(state)
if average:
raise NotImplementedError(str(type(self))+" doesn't support integrating out variational parameters yet.")
else:
rval = 0.5 * (self.beta * T.sqr(state - self.mu)).sum(axis=self.axes_to_sum)
assert rval.ndim == 1
return rval
def inpaint_update(self, state_above, layer_above, drop_mask = None, V = None,
return_unmasked = False):
msg = layer_above.downward_message(state_above)
mu = self.broadcasted_mu()
z = msg + mu
z.name = 'inpainting_z_[unknown_iter]'
if drop_mask is not None:
rval = drop_mask * z + (1-drop_mask) * V
else:
rval = z
rval.name = 'inpainted_V[unknown_iter]'
if return_unmasked:
return rval, z
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
assert state_below is None
msg = layer_above.downward_message(state_above)
mu = self.mu
z = msg + mu
rval = theano_rng.normal(size = z.shape, avg = z, dtype = z.dtype,
std = 1. / T.sqrt(self.beta))
return rval
def recons_cost(self, V, V_hat_unmasked, drop_mask = None, use_sum=False):
return self._recons_cost(V=V, V_hat_unmasked=V_hat_unmasked, drop_mask=drop_mask, use_sum=use_sum, beta=self.beta)
def _recons_cost(self, V, V_hat_unmasked, beta, drop_mask=None, use_sum=False):
V_hat = V_hat_unmasked
assert V.ndim == V_hat.ndim
beta = self.broadcasted_beta()
unmasked_cost = 0.5 * beta * T.sqr(V-V_hat) - 0.5*T.log(beta / (2*np.pi))
assert unmasked_cost.ndim == V_hat.ndim
if drop_mask is None:
masked_cost = unmasked_cost
else:
masked_cost = drop_mask * unmasked_cost
if use_sum:
return masked_cost.mean(axis=0).sum()
return masked_cost.mean()
return masked_cost.mean()
def upward_state(self, total_state):
if self.nvis is None and total_state.ndim != 4:
raise ValueError("total_state should have 4 dimensions, has "+str(total_state.ndim))
assert total_state is not None
V = total_state
self.input_space.validate(V)
upward_state = (V - self.broadcasted_mu()) * self.broadcasted_beta()
return upward_state
def make_state(self, num_examples, numpy_rng):
shape = [num_examples]
if self.nvis is None:
rows, cols = self.space.shape
channels = self.space.num_channels
shape.append(rows)
shape.append(cols)
shape.append(channels)
else:
shape.append(self.nvis)
sample = numpy_rng.randn(*shape)
sample *= 1./np.sqrt(self.beta.get_value())
sample += self.mu.get_value()
rval = sharedX(sample, name = 'v_sample_shared')
return rval
def install_presynaptic_outputs(self, outputs_dict, batch_size):
outputs_dict['output_V_weighted_pred_sum'] = self.space.make_shared_batch(batch_size)
def ensemble_prediction(self, symbolic, outputs_dict, ensemble):
weighted_pred_sum = outputs_dict['output_V_weighted_pred_sum'] \
+ self.broadcasted_beta() * symbolic
beta_sum = sum(ensemble.get_ensemble_variants(self.beta))
unmasked_V_hat = weighted_pred_sum / self.broadcast_beta(beta_sum)
return unmasked_V_hat
def ensemble_recons_cost(self, V, V_hat_unmasked, drop_mask=None,
use_sum=False, ensemble=None):
beta = sum(ensemble.get_ensemble_variants(self.beta)) / ensemble.num_copies
return self._recons_cost(V=V, V_hat_unmasked=V_hat_unmasked, beta=beta, drop_mask=drop_mask,
use_sum=use_sum)
class ConvMaxPool(HiddenLayer):
def __init__(self,
output_channels,
kernel_rows,
kernel_cols,
pool_rows,
pool_cols,
layer_name,
center = False,
irange = None,
sparse_init = None,
scale_by_sharing = True,
init_bias = 0.,
border_mode = 'valid',
output_axes = ('b', 'c', 0, 1)):
self.__dict__.update(locals())
del self.self
assert (irange is None) != (sparse_init is None)
self.b = sharedX( np.zeros((output_channels,)) + init_bias, name = layer_name + '_b')
assert border_mode in ['full','valid']
def broadcasted_bias(self):
assert self.b.ndim == 1
shuffle = [ 'x' ] * 4
shuffle[self.output_axes.index('c')] = 0
return self.b.dimshuffle(*shuffle)
def get_total_state_space(self):
return CompositeSpace((self.h_space, self.output_space))
def set_input_space(self, space):
if not isinstance(space, Conv2DSpace):
raise TypeError("ConvMaxPool can only act on a Conv2DSpace, but received " +
str(type(space))+" as input.")
self.input_space = space
self.input_rows, self.input_cols = space.shape
self.input_channels = space.num_channels
if self.border_mode == 'valid':
self.h_rows = self.input_rows - self.kernel_rows + 1
self.h_cols = self.input_cols - self.kernel_cols + 1
else:
assert self.border_mode == 'full'
self.h_rows = self.input_rows + self.kernel_rows - 1
self.h_cols = self.input_cols + self.kernel_cols - 1
if not( self.h_rows % self.pool_rows == 0):
raise ValueError("h_rows = %d, pool_rows = %d. Should be divisible but remainder is %d" %
(self.h_rows, self.pool_rows, self.h_rows % self.pool_rows))
assert self.h_cols % self.pool_cols == 0
self.h_space = Conv2DSpace(shape = (self.h_rows, self.h_cols), num_channels = self.output_channels,
axes = self.output_axes)
self.output_space = Conv2DSpace(shape = (self.h_rows / self.pool_rows,
self.h_cols / self.pool_cols),
num_channels = self.output_channels,
axes = self.output_axes)
logger.info('{0}: detector shape: {1} '
'pool shape: {2}'.format(self.layer_name,
self.h_space.shape,
self.output_space.shape))
if tuple(self.output_axes) == ('b', 0, 1, 'c'):
self.max_pool = max_pool_b01c
elif tuple(self.output_axes) == ('b', 'c', 0, 1):
self.max_pool = max_pool
else:
raise NotImplementedError()
if self.irange is not None:
self.transformer = make_random_conv2D(self.irange, input_space = space,
output_space = self.h_space, kernel_shape = (self.kernel_rows, self.kernel_cols),
batch_size = self.dbm.batch_size, border_mode = self.border_mode, rng = self.dbm.rng)
else:
self.transformer = make_sparse_random_conv2D(self.sparse_init, input_space = space,
output_space = self.h_space, kernel_shape = (self.kernel_rows, self.kernel_cols),
batch_size = self.dbm.batch_size, border_mode = self.border_mode, rng = self.dbm.rng)
self.transformer._filters.name = self.layer_name + '_W'
W ,= self.transformer.get_params()
assert W.name is not None
if self.center:
p_ofs, h_ofs = self.init_mf_state()
self.p_offset = sharedX(self.output_space.get_origin(), 'p_offset')
self.h_offset = sharedX(self.h_space.get_origin(), 'h_offset')
f = function([], updates={self.p_offset: p_ofs[0,:,:,:], self.h_offset: h_ofs[0,:,:,:]})
f()
def get_params(self):
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
return [ W, self.b]
def state_to_b01c(self, state):
if tuple(self.output_axes) == ('b',0,1,'c'):
return state
return [ Conv2DSpace.convert(elem, self.output_axes, ('b', 0, 1, 'c'))
for elem in state ]
def get_range_rewards(self, state, coeffs):
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
assert len(state) == 2
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
if c == 0.:
continue
assert self.h_space.axes == ('b', 'c', 0, 1)
assert self.output_space.axes == ('b', 'c', 0, 1)
mx = s.max(axis=3).max(axis=2).max(axis=0)
assert hasattr(mx.owner.op, 'grad')
mn = s.min(axis=3).max(axis=2).max(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mx.ndim == 1
assert mn.ndim == 1
r = mx - mn
rval += (1. - r).mean() * c
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
assert len(state) == 2
assert isinstance(target, float)
assert isinstance(coeff, float)
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = 0.
eps = [eps]
else:
if eps is None:
eps = [0., 0.]
assert all([len(elem) == 2 for elem in [state, target, coeff]])
p_target, h_target = target
if h_target > p_target and (coeff[0] != 0. and coeff[1] != 0.):
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
if c == 0.:
continue
m = s.mean(axis= [ ax for ax in range(4) if self.output_axes[ax] != 'c' ])
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_lr_scalers(self):
if self.scale_by_sharing:
cols = self.h_space.shape
num_h = float(h_rows * h_cols)
return OrderedDict([(self.transformer._filters, 1./num_h),
(self.b, 1. / num_h)])
else:
return OrderedDict()
def upward_state(self, total_state):
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return p
def downward_state(self, total_state):
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return h
def get_monitoring_channels_from_state(self, state):
P, H = state
if tuple(self.output_axes) == ('b',0,1,'c'):
p_max = P.max(axis=(0,1,2))
p_min = P.min(axis=(0,1,2))
p_mean = P.mean(axis=(0,1,2))
else:
assert tuple(self.output_axes) == ('b','c',0,1)
p_max = P.max(axis=(0,2,3))
p_min = P.min(axis=(0,2,3))
p_mean = P.mean(axis=(0,2,3))
p_range = p_max - p_min
rval = {
'p_max_max' : p_max.max(),
'p_max_mean' : p_max.mean(),
'p_max_min' : p_max.min(),
'p_min_max' : p_min.max(),
'p_min_mean' : p_min.mean(),
'p_min_max' : p_min.max(),
'p_range_max' : p_range.max(),
'p_range_mean' : p_range.mean(),
'p_range_min' : p_range.min(),
'p_mean_max' : p_mean.max(),
'p_mean_mean' : p_mean.mean(),
'p_mean_min' : p_mean.min()
}
return rval
def get_weight_decay(self, coeffs):
W , = self.transformer.get_params()
return coeffs * T.sqr(W).sum()
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
self.input_space.validate(state_below)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if not hasattr(state_below, 'ndim'):
raise TypeError("state_below should be a TensorType, got " +
str(state_below) + " of type " + str(type(state_below)))
if state_below.ndim != 4:
raise ValueError("state_below should have ndim 4, has "+str(state_below.ndim))
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = self.max_pool(z, (self.pool_rows, self.pool_cols), msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
if state_above is not None:
msg = layer_above.downward_message(state_above)
try:
self.output_space.validate(msg)
except TypeError as e:
reraise_as(TypeError(str(type(layer_above))+".downward_message gave something that was not the right type: "+str(e)))
else:
msg = None
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
p, h, p_sample, h_sample = self.max_pool(z,
(self.pool_rows, self.pool_cols), msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
self.h_space.validate(downward_state)
return self.transformer.lmul_T(downward_state)
def set_batch_size(self, batch_size):
self.transformer.set_batch_size(batch_size)
def get_weights_topo(self):
outp, inp, rows, cols = range(4)
raw = self.transformer._filters.get_value()
return np.transpose(raw,(outp,rows,cols,inp))
def init_mf_state(self):
default_z = self.broadcasted_bias()
shape = {
'b': self.dbm.batch_size,
0: self.h_space.shape[0],
1: self.h_space.shape[1],
'c': self.h_space.num_channels
}
default_z += T.alloc(*([0.]+[shape[elem] for elem in self.h_space.axes])).astype(default_z.dtype)
assert default_z.ndim == 4
p, h = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols))
return p, h
def make_state(self, num_examples, numpy_rng):
t1 = time.time()
empty_input = self.h_space.get_origin_batch(self.dbm.batch_size)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.broadcasted_bias()
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
p_exp, h_exp, p_sample, h_sample = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols),
theano_rng = theano_rng)
p_state = sharedX( self.output_space.get_origin_batch(
self.dbm.batch_size))
t2 = time.time()
f = function([], updates = [
(p_state, p_sample),
(h_state, h_sample)
])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
self.input_space.validate(state_below)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
bias_term = (downward_state * self.broadcasted_bias()).sum(axis=(1,2,3))
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=(1,2,3))
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval
class ConvC01B_MaxPool(HiddenLayer):
def __init__(self,
output_channels,
kernel_shape,
pool_rows,
pool_cols,
layer_name,
center = False,
irange = None,
sparse_init = None,
scale_by_sharing = True,
init_bias = 0.,
pad = 0,
partial_sum = 1):
self.__dict__.update(locals())
del self.self
assert (irange is None) != (sparse_init is None)
self.output_axes = ('c', 0, 1, 'b')
self.detector_channels = output_channels
self.tied_b = 1
def broadcasted_bias(self):
if self.b.ndim != 1:
raise NotImplementedError()
shuffle = [ 'x' ] * 4
shuffle[self.output_axes.index('c')] = 0
return self.b.dimshuffle(*shuffle)
def get_total_state_space(self):
return CompositeSpace((self.h_space, self.output_space))
def set_input_space(self, space):
setup_detector_layer_c01b(layer=self,
input_space=space,
rng=self.dbm.rng,)
if not tuple(space.axes) == ('c', 0, 1, 'b'):
raise AssertionError("You're not using c01b inputs. Ian is enforcing c01b inputs while developing his pipeline to make sure it runs at maximal speed. If you really don't want to use c01b inputs, you can remove this check and things should work. If they don't work it's only because they're not tested.")
if self.dummy_channels != 0:
raise NotImplementedError(str(type(self))+" does not support adding dummy channels for cuda-convnet compatibility yet, you must implement that feature or use inputs with <=3 channels or a multiple of 4 channels")
self.input_rows = self.input_space.shape[0]
self.input_cols = self.input_space.shape[1]
self.h_rows = self.detector_space.shape[0]
self.h_cols = self.detector_space.shape[1]
if not(self.h_rows % self.pool_rows == 0):
raise ValueError(self.layer_name + ": h_rows = %d, pool_rows = %d. Should be divisible but remainder is %d" %
(self.h_rows, self.pool_rows, self.h_rows % self.pool_rows))
assert self.h_cols % self.pool_cols == 0
self.h_space = Conv2DSpace(shape = (self.h_rows, self.h_cols), num_channels = self.output_channels,
axes = self.output_axes)
self.output_space = Conv2DSpace(shape = (self.h_rows / self.pool_rows,
self.h_cols / self.pool_cols),
num_channels = self.output_channels,
axes = self.output_axes)
logger.info('{0} : detector shape: {1} '
'pool shape: {2}'.format(self.layer_name,
self.h_space.shape,
self.output_space.shape))
assert tuple(self.output_axes) == ('c', 0, 1, 'b')
self.max_pool = max_pool_c01b
if self.center:
p_ofs, h_ofs = self.init_mf_state()
self.p_offset = sharedX(self.output_space.get_origin(), 'p_offset')
self.h_offset = sharedX(self.h_space.get_origin(), 'h_offset')
f = function([], updates={self.p_offset: p_ofs[:,:,:,0], self.h_offset: h_ofs[:,:,:,0]})
f()
def get_params(self):
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
return [ W, self.b]
def state_to_b01c(self, state):
if tuple(self.output_axes) == ('b',0,1,'c'):
return state
return [ Conv2DSpace.convert(elem, self.output_axes, ('b', 0, 1, 'c'))
for elem in state ]
def get_range_rewards(self, state, coeffs):
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
if c == 0.:
continue
# Range over everything but the channel index
# theano can only take gradient through max if the max is over 1 axis or all axes
# so I manually unroll the max for the case I use here
assert self.h_space.axes == ('b', 'c', 0, 1)
assert self.output_space.axes == ('b', 'c', 0, 1)
mx = s.max(axis=3).max(axis=2).max(axis=0)
assert hasattr(mx.owner.op, 'grad')
mn = s.min(axis=3).max(axis=2).max(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mx.ndim == 1
assert mn.ndim == 1
r = mx - mn
rval += (1. - r).mean() * c
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(target, float)
assert isinstance(coeff, float)
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = 0.
eps = [eps]
else:
if eps is None:
eps = [0., 0.]
assert all([len(elem) == 2 for elem in [state, target, coeff]])
p_target, h_target = target
if h_target > p_target and (coeff[0] != 0. and coeff[1] != 0.):
# note that, within each group, E[p] is the sum of E[h]
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
if c == 0.:
continue
# Average over everything but the channel index
m = s.mean(axis= [ ax for ax in range(4) if self.output_axes[ax] != 'c' ])
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_lr_scalers(self):
rval = OrderedDict()
if self.scale_by_sharing:
# scale each learning rate by 1 / # times param is reused
h_rows, h_cols = self.h_space.shape
num_h = float(h_rows * h_cols)
rval[self.transformer._filters] = 1. /num_h
rval[self.b] = 1. / num_h
return rval
def upward_state(self, total_state):
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return p
def downward_state(self, total_state):
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return h
def get_monitoring_channels_from_state(self, state):
P, H = state
axes = tuple([i for i, ax in enumerate(self.output_axes) if ax != 'c'])
p_max = P.max(axis=(0,1,2))
p_min = P.min(axis=(0,1,2))
p_mean = P.mean(axis=(0,1,2))
p_range = p_max - p_min
rval = {
'p_max_max' : p_max.max(),
'p_max_mean' : p_max.mean(),
'p_max_min' : p_max.min(),
'p_min_max' : p_min.max(),
'p_min_mean' : p_min.mean(),
'p_min_max' : p_min.max(),
'p_range_max' : p_range.max(),
'p_range_mean' : p_range.mean(),
'p_range_min' : p_range.min(),
'p_mean_max' : p_mean.max(),
'p_mean_mean' : p_mean.mean(),
'p_mean_min' : p_mean.min()
}
return rval
def get_weight_decay(self, coeffs):
W , = self.transformer.get_params()
return coeffs * T.sqr(W).sum()
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
self.input_space.validate(state_below)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if not hasattr(state_below, 'ndim'):
raise TypeError("state_below should be a TensorType, got " +
str(state_below) + " of type " + str(type(state_below)))
if state_below.ndim != 4:
raise ValueError("state_below should have ndim 4, has "+str(state_below.ndim))
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = self.max_pool(z, (self.pool_rows, self.pool_cols), msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
raise NotImplementedError("Need to update for C01B")
if state_above is not None:
msg = layer_above.downward_message(state_above)
try:
self.output_space.validate(msg)
except TypeError as e:
reraise_as(TypeError(str(type(layer_above))+".downward_message gave something that was not the right type: "+str(e)))
else:
msg = None
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
p, h, p_sample, h_sample = self.max_pool(z,
(self.pool_rows, self.pool_cols), msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
self.h_space.validate(downward_state)
return self.transformer.lmul_T(downward_state)
def set_batch_size(self, batch_size):
self.transformer.set_batch_size(batch_size)
def get_weights_topo(self):
return self.transformer.get_weights_topo()
def init_mf_state(self):
default_z = self.broadcasted_bias()
shape = {
'b': self.dbm.batch_size,
0: self.h_space.shape[0],
1: self.h_space.shape[1],
'c': self.h_space.num_channels
}
# work around theano bug with broadcasted stuff
default_z += T.alloc(*([0.]+[shape[elem] for elem in self.h_space.axes])).astype(default_z.dtype)
assert default_z.ndim == 4
p, h = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols))
return p, h
def make_state(self, num_examples, numpy_rng):
raise NotImplementedError("Need to update for C01B")
t1 = time.time()
empty_input = self.h_space.get_origin_batch(self.dbm.batch_size)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.broadcasted_bias()
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
p_exp, h_exp, p_sample, h_sample = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols),
theano_rng = theano_rng)
p_state = sharedX( self.output_space.get_origin_batch(
self.dbm.batch_size))
t2 = time.time()
f = function([], updates = [
(p_state, p_sample),
(h_state, h_sample)
])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took {1}'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
raise NotImplementedError("Need to update for C01B")
self.input_space.validate(state_below)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = (downward_state * self.broadcasted_bias()).sum(axis=(1,2,3))
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=(1,2,3))
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval
class BVMP_Gaussian(BinaryVectorMaxPool):
def __init__(self,
input_layer,
detector_layer_dim,
pool_size,
layer_name,
irange = None,
sparse_init = None,
sparse_stdev = 1.,
include_prob = 1.0,
init_bias = 0.,
W_lr_scale = None,
b_lr_scale = None,
center = False,
mask_weights = None,
max_col_norm = None,
copies = 1):
warnings.warn("BVMP_Gaussian math is very faith-based, need to complete gaussian.lyx")
args = locals()
del args['input_layer']
del args['self']
super(BVMP_Gaussian, self).__init__(**args)
self.input_layer = input_layer
def get_weights(self):
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
raise NotImplementedError()
W ,= self.transformer.get_params()
W = W.get_value()
x = input("multiply by beta?")
if x == 'y':
beta = self.input_layer.beta.get_value()
return (W.T * beta).T
assert x == 'n'
return W
def set_weights(self, weights):
raise NotImplementedError("beta would make get_weights for visualization not correspond to set_weights")
W, = self.transformer.get_params()
W.set_value(weights)
def set_biases(self, biases, recenter = False):
self.b.set_value(biases)
if recenter:
assert self.center
if self.pool_size != 1:
raise NotImplementedError()
self.offset.set_value(sigmoid_numpy(self.b.get_value()))
def get_biases(self):
return self.b.get_value() - self.beta_bias().eval()
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
raise NotImplementedError("need to account for beta")
if self.copies != 1:
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
if state_above is not None:
msg = layer_above.downward_message(state_above)
else:
msg = None
if self.requires_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
z = self.transformer.lmul(state_below) + self.b
p, h, p_sample, h_sample = max_pool_channels(z,
self.pool_size, msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
rval = self.transformer.lmul_T(downward_state)
if self.requires_reformat:
rval = self.desired_space.format_as(rval, self.input_space)
return rval * self.copies
def init_mf_state(self):
# work around theano bug with broadcasted vectors
z = T.alloc(0., self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + \
self.b.dimshuffle('x', 0) + self.beta_bias()
rval = max_pool_channels(z = z,
pool_size = self.pool_size)
return rval
def make_state(self, num_examples, numpy_rng):
raise NotImplementedError("need to account for beta")
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
empty_input = self.h_space.get_origin_batch(num_examples)
empty_output = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
p_state = sharedX(empty_output)
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
default_z = T.zeros_like(h_state) + self.b
p_exp, h_exp, p_sample, h_sample = max_pool_channels(
z = default_z,
pool_size = self.pool_size,
theano_rng = theano_rng)
assert h_sample.dtype == default_z.dtype
f = function([], updates = [
(p_state , p_sample),
(h_state , h_sample)
])
f()
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
raise NotImplementedError("need to account for beta, and maybe some oether stuff")
# Don't need to do anything special for centering, upward_state / downward state
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
bias_term = T.dot(downward_state, self.b)
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=1)
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval * self.copies
def linear_feed_forward_approximation(self, state_below):
raise NotImplementedError("need to account for beta")
z = self.transformer.lmul(state_below) + self.b
if self.pool_size != 1:
raise NotImplementedError()
return z, z
def beta_bias(self):
W, = self.transformer.get_params()
beta = self.input_layer.beta
assert beta.ndim == 1
return - 0.5 * T.dot(beta, T.sqr(W))
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.b + self.beta_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = max_pool_channels(z, self.pool_size, msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
class CompositeLayer(HiddenLayer):
def __init__(self, layer_name, components, inputs_to_components = None):
self.layer_name = layer_name
self.components = list(components)
assert isinstance(components, list)
for component in components:
assert isinstance(component, HiddenLayer)
self.num_components = len(components)
self.components = list(components)
if inputs_to_components is None:
self.inputs_to_components = None
else:
if not isinstance(inputs_to_components, dict):
raise TypeError("CompositeLayer expected inputs_to_components to be a dict, got "+str(type(inputs_to_components)))
self.inputs_to_components = OrderedDict()
for key in inputs_to_components:
assert isinstance(key, int)
assert key >= 0
value = inputs_to_components[key]
assert isinstance(value, list)
assert all([isinstance(elem, int) for elem in value])
assert min(value) >= 0
assert max(value) < self.num_components
self.inputs_to_components[key] = list(value)
def set_input_space(self, space):
self.input_space = space
if not isinstance(space, CompositeSpace):
assert self.inputs_to_components is None
self.routing_needed = False
else:
if self.inputs_to_components is None:
self.routing_needed = False
else:
self.routing_needed = True
assert max(self.inputs_to_components) < space.num_components
# Invert the dictionary
self.components_to_inputs = OrderedDict()
for i in xrange(self.num_components):
inputs = []
for j in xrange(space.num_components):
if i in self.inputs_to_components[j]:
inputs.append(i)
if len(inputs) < space.num_components:
self.components_to_inputs[i] = inputs
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_space = space.restrict(self.components_to_inputs[i])
else:
cur_space = space
component.set_input_space(cur_space)
self.output_space = CompositeSpace([ component.get_output_space() for component in self.components ])
def make_state(self, num_examples, numpy_rng):
return tuple(component.make_state(num_examples, numpy_rng) for
component in self.components)
def get_total_state_space(self):
return CompositeSpace([component.get_total_state_space() for component in self.components])
def set_batch_size(self, batch_size):
for component in self.components:
component.set_batch_size(batch_size)
def set_dbm(self, dbm):
for component in self.components:
component.set_dbm(dbm)
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
rval = []
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_state_below =self.input_space.restrict_batch(state_below, self.components_to_inputs[i])
else:
cur_state_below = state_below
class RoutingLayer(object):
def __init__(self, idx, layer):
self.__dict__.update(locals())
del self.self
self.layer_name = 'route_'+str(idx)+'_'+layer.layer_name
def downward_message(self, state):
return self.layer.downward_message(state)[self.idx]
if layer_above is not None:
cur_layer_above = RoutingLayer(i, layer_above)
else:
cur_layer_above = None
mf_update = component.mf_update(state_below = cur_state_below,
state_above = state_above,
layer_above = cur_layer_above,
double_weights = double_weights,
iter_name = iter_name)
rval.append(mf_update)
return tuple(rval)
def init_mf_state(self):
return tuple([component.init_mf_state() for component in self.components])
def get_weight_decay(self, coeffs):
return sum([component.get_weight_decay(coeff) for component, coeff
in safe_zip(self.components, coeffs)])
def upward_state(self, total_state):
return tuple([component.upward_state(elem)
for component, elem in
safe_zip(self.components, total_state)])
def downward_state(self, total_state):
return tuple([component.downward_state(elem)
for component, elem in
safe_zip(self.components, total_state)])
def downward_message(self, downward_state):
if isinstance(self.input_space, CompositeSpace):
num_input_components = self.input_space.num_components
else:
num_input_components = 1
rval = [ None ] * num_input_components
def add(x, y):
if x is None:
return y
if y is None:
return x
return x + y
for i, packed in enumerate(safe_zip(self.components, downward_state)):
component, state = packed
if self.routing_needed and i in self.components_to_inputs:
input_idx = self.components_to_inputs[i]
else:
input_idx = range(num_input_components)
partial_message = component.downward_message(state)
if len(input_idx) == 1:
partial_message = [ partial_message ]
assert len(input_idx) == len(partial_message)
for idx, msg in safe_zip(input_idx, partial_message):
rval[idx] = add(rval[idx], msg)
if len(rval) == 1:
rval = rval[0]
else:
rval = tuple(rval)
self.input_space.validate(rval)
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
return sum([ comp.get_l1_act_cost(s, t, c, e) \
for comp, s, t, c, e in safe_zip(self.components, state, target, coeff, eps)])
def get_range_rewards(self, state, coeffs):
return sum([comp.get_range_rewards(s, c)
for comp, s, c in safe_zip(self.components, state, coeffs)])
def get_params(self):
return reduce(lambda x, y: safe_union(x, y),
[component.get_params() for component in self.components])
def get_weights_topo(self):
logger.info('Get topological weights for which layer?')
for i, component in enumerate(self.components):
logger.info('{0} {1}'.format(i, component.layer_name))
x = input()
return self.components[int(x)].get_weights_topo()
def get_monitoring_channels_from_state(self, state):
rval = OrderedDict()
for layer, s in safe_zip(self.components, state):
d = layer.get_monitoring_channels_from_state(s)
for key in d:
rval[layer.layer_name+'_'+key] = d[key]
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
rval = []
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_state_below =self.input_space.restrict_batch(state_below, self.components_to_inputs[i])
else:
cur_state_below = state_below
class RoutingLayer(object):
def __init__(self, idx, layer):
self.__dict__.update(locals())
del self.self
self.layer_name = 'route_'+str(idx)+'_'+layer.layer_name
def downward_message(self, state):
return self.layer.downward_message(state)[self.idx]
if layer_above is not None:
cur_layer_above = RoutingLayer(i, layer_above)
else:
cur_layer_above = None
sample = component.sample(state_below = cur_state_below,
state_above = state_above,
layer_above = cur_layer_above,
theano_rng = theano_rng)
rval.append(sample)
return tuple(rval)
| true | true |
f731cf8514305b867ae3294d7fcffca29ab4ef7a | 8,011 | py | Python | stdplugins/update.py | birhan-15/PepeBot | e76f80eab21ce396a785aff271bcbae9cce6d2ba | [
"Apache-2.0"
] | 1 | 2020-08-09T11:43:20.000Z | 2020-08-09T11:43:20.000Z | stdplugins/update.py | mikisol95/UniDragon | 55d8aa0659c8406fe1681e977fab27e902dd44f6 | [
"Apache-2.0"
] | null | null | null | stdplugins/update.py | mikisol95/UniDragon | 55d8aa0659c8406fe1681e977fab27e902dd44f6 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
# Credits goes to @AvinashReddy3108 for creating this plugin
# edited to work on Uniborg by @Mayur_Karaniya
#
"""
This module updates the userbot based on Upstream revision
cmd is .update
Usage: Checks if the main userbot repository has any updates and shows a changelog if so.
.
cmd .update now
Usage: Updates your userbot, if there are any updates in the main userbot repository.
.
Credits goes to @AvinashReddy3108 for creating this plugin
edited to work on Uniborg by @Mayur_Karaniya
this is a Hugh fix thanks to @SpEcHiDe and @devpatel_73
"""
from os import environ, execle, path, remove
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from os import remove
import sys
from git import Repo
from git.exc import GitCommandError
from git.exc import InvalidGitRepositoryError
from git.exc import NoSuchPathError
import asyncio
from uniborg.util import admin_cmd
import sys
import asyncio
from sample_config import Config
#
# ===============================Basic Constants=============================
# UPSTREAM_REPO_URL is as same as below.
# "https://github.com/prono69/PepeBot.git"
UPSTREAM_REPO_URL = Config.UPSTREAM_REPO_URL
# REPO_LINK is as same as below. "https://github.com/prono69/PepeBot.git"
REPO_LINK = Config.REPO_LINK
# provide your HEROKU_API_KEY in place of this value.
HEROKU_API_KEY = Config.HEROKU_API_KEY
# provide your HEROKU_APP_NAME in place of this value.
HEROKU_APP_NAME = Config.HEROKU_APP_NAME
# heroku memes
HEROKU_MEMEZ = Config.HEROKU_MEMEZ
# getting you git repo name is also needed
GIT_REPO_NAME = Config.GIT_REPO_NAME
# ===============================Basic Constants=============================
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} by <{c.author}>\n'
return ch_log
async def updateme_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@borg.on(admin_cmd(pattern="update ?(.*)", outgoing=True, allow_sudo=True))
async def upstream(ups):
"For .update command, check if the bot is up to date, update if specified"
await ups.edit("`Checking for updates, please wait....`")
conf = ups.pattern_match.group(1)
off_repo = UPSTREAM_REPO_URL
force_updateme = False
try:
txt = "`Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(
f"**Unfortunately, the directory {error} does not seem to be a git repository.\
\nOr Maybe it just needs a sync verification with {GIT_REPO_NAME}\
\nBut we can fix that by force updating the userbot using** `.update now.`"
)
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_updateme = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_updateme:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_updateme:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond('do \"`.update now`\" to update')
return
if force_updateme:
await ups.edit(
'`Force-Syncing to latest stable userbot code, please wait...`')
else:
await ups.edit('`Updating userbot, please wait....`')
# We're in a Heroku Dyno, handle it's memez.
if HEROKU_API_KEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_API_KEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APP_NAME:
await ups.edit(
'`[HEROKU MEMEZ] Please set up the HEROKU_APP_NAME variable to be able to update userbot.`'
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APP_NAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
await ups.edit('`[HEROKU MEMEZ]\
\nUserbot dyno build in progress, please wait for it to complete.`'
)
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_API_KEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except GitCommandError as error:
await ups.edit(f'{txt}\n`Here is the error log:\n{error}`')
repo.__del__()
return
await ups.edit('`Successfully Updated!\n'
'Restarting, please wait...`')
else:
# Classic Updater, pretty straightforward.
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
await updateme_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
# Spin a new instance of bot
args = [sys.executable, "-m", "stdborg"]
execle(sys.executable, *args, environ)
return
| 35.135965 | 107 | 0.615653 |
from os import environ, execle, path, remove
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from os import remove
import sys
from git import Repo
from git.exc import GitCommandError
from git.exc import InvalidGitRepositoryError
from git.exc import NoSuchPathError
import asyncio
from uniborg.util import admin_cmd
import sys
import asyncio
from sample_config import Config
UPSTREAM_REPO_URL = Config.UPSTREAM_REPO_URL
REPO_LINK = Config.REPO_LINK
HEROKU_API_KEY = Config.HEROKU_API_KEY
HEROKU_APP_NAME = Config.HEROKU_APP_NAME
HEROKU_MEMEZ = Config.HEROKU_MEMEZ
GIT_REPO_NAME = Config.GIT_REPO_NAME
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} by <{c.author}>\n'
return ch_log
async def updateme_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@borg.on(admin_cmd(pattern="update ?(.*)", outgoing=True, allow_sudo=True))
async def upstream(ups):
await ups.edit("`Checking for updates, please wait....`")
conf = ups.pattern_match.group(1)
off_repo = UPSTREAM_REPO_URL
force_updateme = False
try:
txt = "`Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(
f"**Unfortunately, the directory {error} does not seem to be a git repository.\
\nOr Maybe it just needs a sync verification with {GIT_REPO_NAME}\
\nBut we can fix that by force updating the userbot using** `.update now.`"
)
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_updateme = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_updateme:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_updateme:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond('do \"`.update now`\" to update')
return
if force_updateme:
await ups.edit(
'`Force-Syncing to latest stable userbot code, please wait...`')
else:
await ups.edit('`Updating userbot, please wait....`')
if HEROKU_API_KEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_API_KEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APP_NAME:
await ups.edit(
'`[HEROKU MEMEZ] Please set up the HEROKU_APP_NAME variable to be able to update userbot.`'
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APP_NAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
await ups.edit('`[HEROKU MEMEZ]\
\nUserbot dyno build in progress, please wait for it to complete.`'
)
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_API_KEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except GitCommandError as error:
await ups.edit(f'{txt}\n`Here is the error log:\n{error}`')
repo.__del__()
return
await ups.edit('`Successfully Updated!\n'
'Restarting, please wait...`')
else:
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
await updateme_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
args = [sys.executable, "-m", "stdborg"]
execle(sys.executable, *args, environ)
return
| true | true |
f731cf92be64b50cb66f64fd5686247ba4b5e5d3 | 518 | py | Python | params.py | Jensbeltman/ai2ludo | b4acbdb319e044cc3db07e5dc8c8eba7955aa9f7 | [
"MIT"
] | null | null | null | params.py | Jensbeltman/ai2ludo | b4acbdb319e044cc3db07e5dc8c8eba7955aa9f7 | [
"MIT"
] | null | null | null | params.py | Jensbeltman/ai2ludo | b4acbdb319e044cc3db07e5dc8c8eba7955aa9f7 | [
"MIT"
] | null | null | null | ##Local indexes
homeI = 0
startI = 1
# Goal area
goalI = 57 # local goal index
goalAreaStartI = 52 # local index where goal are starts
#Stars
starI = [6,12,19,25,32,38,45,51] # start indexes
starAtGoalI = 51 # star infront of goal area
#Globes
globeAtStartI = [1]
globeEnemy = [14,27,40]
globeSafeI = [1,9,22,35,48]
globeI = [1,9,14,22,27,35,40,48]
#Global indexes
# number of players,pieces,params etc
n_players = 4
n_pieces = 4
n_pieceParams = 10
globeDie = 3
starDie = 5
sentHomePlayer = 1
sentHomeEnemy = 2
| 17.862069 | 55 | 0.706564 | rtI = 1
goalI = 57
goalAreaStartI = 52
starI = [6,12,19,25,32,38,45,51]
starAtGoalI = 51
globeAtStartI = [1]
globeEnemy = [14,27,40]
globeSafeI = [1,9,22,35,48]
globeI = [1,9,14,22,27,35,40,48]
n_players = 4
n_pieces = 4
n_pieceParams = 10
globeDie = 3
starDie = 5
sentHomePlayer = 1
sentHomeEnemy = 2
| true | true |
f731d08063ca1f99f82b11fcdb5162109f2bc026 | 448 | py | Python | applications/signals.py | mattorgill/registration | 96effc1a826e81365f5f01ac0435e80419313b07 | [
"MIT"
] | 5 | 2020-07-22T19:18:09.000Z | 2021-02-05T21:30:42.000Z | applications/signals.py | mattorgill/registration | 96effc1a826e81365f5f01ac0435e80419313b07 | [
"MIT"
] | 9 | 2021-01-27T21:42:51.000Z | 2022-03-12T00:20:25.000Z | applications/signals.py | mattorgill/registration | 96effc1a826e81365f5f01ac0435e80419313b07 | [
"MIT"
] | 1 | 2018-02-25T02:05:30.000Z | 2018-02-25T02:05:30.000Z | from django.db.models.signals import post_save
from django.dispatch import receiver
from applications import models
# Delete DraftApplication when application submitted
@receiver(post_save, sender=models.Application)
def clean_draft_application(sender, instance, created, *args, **kwargs):
if not created:
return None
# Delete draft as its no longer needed
models.DraftApplication.objects.filter(user=instance.user).delete()
| 32 | 72 | 0.785714 | from django.db.models.signals import post_save
from django.dispatch import receiver
from applications import models
@receiver(post_save, sender=models.Application)
def clean_draft_application(sender, instance, created, *args, **kwargs):
if not created:
return None
models.DraftApplication.objects.filter(user=instance.user).delete()
| true | true |
f731d0a14032c4b2950bf53f828df39fd8c4fedd | 12,274 | py | Python | nn_dataflow/core/layer.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | nn_dataflow/core/layer.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | nn_dataflow/core/layer.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | """ $lic$
Copyright (C) 2016-2019 by The Board of Trustees of Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from . import data_category_enum as de
from . import loop_enum as le
from .. import util
from .data_dim_loops import DataDimLoops
class Layer(util.ContentHashClass):
'''
Base NN layer.
Includes only the output neuron parameters.
nofm: # ofmap channels
hofm, wofm: ofmap height/width
htrd, wtrd: stride height/width
'''
def __init__(self, nofm, sofm, strd=1):
if isinstance(sofm, int):
hofm = sofm
wofm = sofm
elif len(sofm) == 2:
hofm = sofm[0]
wofm = sofm[1]
else:
raise ValueError('Layer: sofm is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sofm))
assert hofm > 0 and wofm > 0
if isinstance(strd, int):
htrd = strd
wtrd = strd
elif len(strd) == 2:
htrd = strd[0]
wtrd = strd[1]
else:
raise ValueError('Layer: strd is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(strd))
assert htrd > 0 and wtrd > 0
self.nofm = nofm
self.hofm = hofm
self.wofm = wofm
self.htrd = htrd
self.wtrd = wtrd
@staticmethod
def data_loops():
''' Dimension loops of the data. '''
raise NotImplementedError
def input_layer(self):
''' Get the input layer parameters. '''
raise NotImplementedError(self.__class__.__name__)
@property
def nifm(self):
''' Number of fmap channels of input layer. '''
return self.input_layer().nofm
@property
def hifm(self):
''' Fmap height of input layer. '''
return self.input_layer().hofm
@property
def wifm(self):
''' Fmap width of input layer. '''
return self.input_layer().wofm
def ofmap_size(self, batch_size=1, word_size=1):
'''
Get size of one output fmap with `batch_size`.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.hofm * self.wofm * batch_size * word_size
def total_ofmap_size(self, batch_size=1, word_size=1):
'''
Get total size of all output fmaps with `batch_size`.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.nofm * self.ofmap_size(batch_size, word_size)
def ifmap_size(self, batch_size=1, word_size=1):
'''
Get size of one input fmap with `batch_size`.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.input_layer().ofmap_size(batch_size, word_size)
def total_ifmap_size(self, batch_size=1, word_size=1):
'''
Get total size of all input fmaps with `batch_size`.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.input_layer().total_ofmap_size(batch_size, word_size)
def ops_per_neuron(self):
''' Number of operations per neuron. '''
raise NotImplementedError(self.__class__.__name__)
def total_ops(self, batch_size=1):
''' Get total number of operations. '''
return self.total_ofmap_size() * self.ops_per_neuron() * batch_size
def is_valid_padding_sifm(self, sifm):
''' Whether the given `sifm` is valid when allowing padding. '''
if isinstance(sifm, int):
hifm = sifm
wifm = sifm
elif len(sifm) == 2:
hifm = sifm[0]
wifm = sifm[1]
else:
raise ValueError('Layer: sifm is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sifm))
h_padding_rng = sorted((self.hofm * self.htrd, self.hifm))
w_padding_rng = sorted((self.wofm * self.wtrd, self.wifm))
return (h_padding_rng[0] <= hifm <= h_padding_rng[1]
and w_padding_rng[0] <= wifm <= w_padding_rng[1])
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class InputLayer(Layer):
'''
NN input layer parameters.
'''
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops()
dls[de.IFM] = DataDimLoops()
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return None
def ops_per_neuron(self):
return 0
class ConvLayer(Layer):
'''
NN convolutional layer parameters.
nifm (C): # ifmap channels
nofm (M): # ofmap channels
hifm, wifm (H): ifmap height/width
hofm, wofm (E): ofmap height/width
hfil, wfil (R): weight filter width/height
htrd, wtrd (U): stride height/width
'''
def __init__(self, nifm, nofm, sofm, sfil, strd=1):
super(ConvLayer, self).__init__(nofm, sofm, strd=strd)
if isinstance(sfil, int):
hfil = sfil
wfil = sfil
elif len(sfil) == 2:
hfil = sfil[0]
wfil = sfil[1]
else:
raise ValueError('ConvLayer: sfil is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sfil))
self.hfil = hfil
self.wfil = wfil
hifm = self.hfil + (self.hofm - 1) * self.htrd
wifm = self.wfil + (self.wofm - 1) * self.wtrd
self.inlayer = Layer(nifm, (hifm, wifm))
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops(le.IFM, le.OFM)
dls[de.IFM] = DataDimLoops(le.IFM, le.BAT)
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return self.inlayer
def ops_per_neuron(self):
# 2D convolution across all ifmap channels.
return self.hfil * self.wfil * self.nifm
def filter_size(self, word_size=1):
'''
Get size of one weight filter.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.hfil * self.wfil * word_size
def total_filter_size(self, word_size=1):
'''
Get total size of all weight filters.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.nifm * self.nofm * self.filter_size(word_size)
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nifm={}'.format(repr(self.nifm)),
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'sfil={}'.format(repr((self.hfil, self.wfil))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class FCLayer(ConvLayer):
'''
NN fully-connected layer parameters.
As a special case of CONVLayer.
hifm = hfil, wifm = wfil, strd = 1, hofm = wofm = 1
'''
def __init__(self, nifm, nofm, sfil=1):
super(FCLayer, self).__init__(nifm, nofm, 1, sfil)
assert self.hofm == 1 and self.wofm == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nifm={}'.format(repr(self.nifm)),
'nofm={}'.format(repr(self.nofm)),
'sfil={}'.format(repr((self.hfil, self.wfil)))]))
class LocalRegionLayer(Layer):
'''
NN layer which computes on a local region. The layer has no or limited
shared weights, whose impact can be ignored during scheduling.
Includes pooling layer, normalization layer, and element-wise layer.
'''
def __init__(self, nofm, sofm, nreg, sreg, ntrd=1, strd=1):
super(LocalRegionLayer, self).__init__(nofm, sofm, strd=strd)
if isinstance(sreg, int):
hreg = sreg
wreg = sreg
elif len(sreg) == 2:
hreg = sreg[0]
wreg = sreg[1]
else:
raise ValueError('LocalRegionLayer: sreg is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sreg))
if nreg > 1 and (hreg * wreg) > 1:
raise ValueError('LocalRegionLayer: local region cannot be a mix '
'of both n ({}) and h & w ({}, {})'
.format(nreg, hreg, wreg))
self.nreg = nreg
self.hreg = hreg
self.wreg = wreg
self.ntrd = ntrd
nifm = self.nofm * self.ntrd # ignore all-zero padding channels.
hifm = self.hreg + (self.hofm - 1) * self.htrd
wifm = self.wreg + (self.wofm - 1) * self.wtrd
self.inlayer = Layer(nifm, (hifm, wifm))
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops()
dls[de.IFM] = DataDimLoops(le.OFM, le.BAT)
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return self.inlayer
def ops_per_neuron(self):
# Each output point corresponds to merging a local region.
return self.region_size()
def region_size(self):
''' The size of the local region corresponding to one output point. '''
return self.nreg * self.hreg * self.wreg
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'nreg={}'.format(repr(self.nreg)),
'sreg={}'.format(repr((self.hreg, self.wreg))),
'ntrd={}'.format(repr(self.ntrd)),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class PoolingLayer(LocalRegionLayer):
'''
NN pooling layer parameters.
As a special case of LocalRegionLayer.
nreg = ntrd = 1
'''
def __init__(self, nofm, sofm, sreg, strd=None):
if strd is None:
strd = sreg
super(PoolingLayer, self).__init__(nofm, sofm, 1, sreg,
ntrd=1, strd=strd)
assert self.nreg == 1
assert self.ntrd == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'sreg={}'.format(repr((self.hreg, self.wreg))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class EltwiseLayer(LocalRegionLayer):
'''
NN element-wise layer parameters.
As a special case of LocalRegionLayer.
nreg = ntrd, sreg = 1
'''
def __init__(self, nofm, sofm, nreg):
super(EltwiseLayer, self).__init__(nofm, sofm, nreg, 1,
ntrd=nreg, strd=1)
assert self.hreg == self.wreg == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'nreg={}'.format(repr(self.nreg))]))
| 31.715762 | 79 | 0.554913 |
from . import data_category_enum as de
from . import loop_enum as le
from .. import util
from .data_dim_loops import DataDimLoops
class Layer(util.ContentHashClass):
def __init__(self, nofm, sofm, strd=1):
if isinstance(sofm, int):
hofm = sofm
wofm = sofm
elif len(sofm) == 2:
hofm = sofm[0]
wofm = sofm[1]
else:
raise ValueError('Layer: sofm is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sofm))
assert hofm > 0 and wofm > 0
if isinstance(strd, int):
htrd = strd
wtrd = strd
elif len(strd) == 2:
htrd = strd[0]
wtrd = strd[1]
else:
raise ValueError('Layer: strd is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(strd))
assert htrd > 0 and wtrd > 0
self.nofm = nofm
self.hofm = hofm
self.wofm = wofm
self.htrd = htrd
self.wtrd = wtrd
@staticmethod
def data_loops():
raise NotImplementedError
def input_layer(self):
raise NotImplementedError(self.__class__.__name__)
@property
def nifm(self):
return self.input_layer().nofm
@property
def hifm(self):
return self.input_layer().hofm
@property
def wifm(self):
return self.input_layer().wofm
def ofmap_size(self, batch_size=1, word_size=1):
return self.hofm * self.wofm * batch_size * word_size
def total_ofmap_size(self, batch_size=1, word_size=1):
return self.nofm * self.ofmap_size(batch_size, word_size)
def ifmap_size(self, batch_size=1, word_size=1):
return self.input_layer().ofmap_size(batch_size, word_size)
def total_ifmap_size(self, batch_size=1, word_size=1):
return self.input_layer().total_ofmap_size(batch_size, word_size)
def ops_per_neuron(self):
raise NotImplementedError(self.__class__.__name__)
def total_ops(self, batch_size=1):
return self.total_ofmap_size() * self.ops_per_neuron() * batch_size
def is_valid_padding_sifm(self, sifm):
if isinstance(sifm, int):
hifm = sifm
wifm = sifm
elif len(sifm) == 2:
hifm = sifm[0]
wifm = sifm[1]
else:
raise ValueError('Layer: sifm is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sifm))
h_padding_rng = sorted((self.hofm * self.htrd, self.hifm))
w_padding_rng = sorted((self.wofm * self.wtrd, self.wifm))
return (h_padding_rng[0] <= hifm <= h_padding_rng[1]
and w_padding_rng[0] <= wifm <= w_padding_rng[1])
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class InputLayer(Layer):
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops()
dls[de.IFM] = DataDimLoops()
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return None
def ops_per_neuron(self):
return 0
class ConvLayer(Layer):
def __init__(self, nifm, nofm, sofm, sfil, strd=1):
super(ConvLayer, self).__init__(nofm, sofm, strd=strd)
if isinstance(sfil, int):
hfil = sfil
wfil = sfil
elif len(sfil) == 2:
hfil = sfil[0]
wfil = sfil[1]
else:
raise ValueError('ConvLayer: sfil is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sfil))
self.hfil = hfil
self.wfil = wfil
hifm = self.hfil + (self.hofm - 1) * self.htrd
wifm = self.wfil + (self.wofm - 1) * self.wtrd
self.inlayer = Layer(nifm, (hifm, wifm))
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops(le.IFM, le.OFM)
dls[de.IFM] = DataDimLoops(le.IFM, le.BAT)
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return self.inlayer
def ops_per_neuron(self):
return self.hfil * self.wfil * self.nifm
def filter_size(self, word_size=1):
return self.hfil * self.wfil * word_size
def total_filter_size(self, word_size=1):
return self.nifm * self.nofm * self.filter_size(word_size)
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nifm={}'.format(repr(self.nifm)),
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'sfil={}'.format(repr((self.hfil, self.wfil))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class FCLayer(ConvLayer):
def __init__(self, nifm, nofm, sfil=1):
super(FCLayer, self).__init__(nifm, nofm, 1, sfil)
assert self.hofm == 1 and self.wofm == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nifm={}'.format(repr(self.nifm)),
'nofm={}'.format(repr(self.nofm)),
'sfil={}'.format(repr((self.hfil, self.wfil)))]))
class LocalRegionLayer(Layer):
def __init__(self, nofm, sofm, nreg, sreg, ntrd=1, strd=1):
super(LocalRegionLayer, self).__init__(nofm, sofm, strd=strd)
if isinstance(sreg, int):
hreg = sreg
wreg = sreg
elif len(sreg) == 2:
hreg = sreg[0]
wreg = sreg[1]
else:
raise ValueError('LocalRegionLayer: sreg is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sreg))
if nreg > 1 and (hreg * wreg) > 1:
raise ValueError('LocalRegionLayer: local region cannot be a mix '
'of both n ({}) and h & w ({}, {})'
.format(nreg, hreg, wreg))
self.nreg = nreg
self.hreg = hreg
self.wreg = wreg
self.ntrd = ntrd
nifm = self.nofm * self.ntrd
hifm = self.hreg + (self.hofm - 1) * self.htrd
wifm = self.wreg + (self.wofm - 1) * self.wtrd
self.inlayer = Layer(nifm, (hifm, wifm))
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops()
dls[de.IFM] = DataDimLoops(le.OFM, le.BAT)
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return self.inlayer
def ops_per_neuron(self):
return self.region_size()
def region_size(self):
return self.nreg * self.hreg * self.wreg
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'nreg={}'.format(repr(self.nreg)),
'sreg={}'.format(repr((self.hreg, self.wreg))),
'ntrd={}'.format(repr(self.ntrd)),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class PoolingLayer(LocalRegionLayer):
def __init__(self, nofm, sofm, sreg, strd=None):
if strd is None:
strd = sreg
super(PoolingLayer, self).__init__(nofm, sofm, 1, sreg,
ntrd=1, strd=strd)
assert self.nreg == 1
assert self.ntrd == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'sreg={}'.format(repr((self.hreg, self.wreg))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class EltwiseLayer(LocalRegionLayer):
def __init__(self, nofm, sofm, nreg):
super(EltwiseLayer, self).__init__(nofm, sofm, nreg, 1,
ntrd=nreg, strd=1)
assert self.hreg == self.wreg == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'nreg={}'.format(repr(self.nreg))]))
| true | true |
f731d0bb38308f507a437ba37cd55c59966a1f08 | 3,594 | py | Python | Ui_DDSMonitor.py | zhangyintai/Experiment_Manager | 800f95068a12b64d4a7e524fe406d5ef3b47f521 | [
"MIT"
] | null | null | null | Ui_DDSMonitor.py | zhangyintai/Experiment_Manager | 800f95068a12b64d4a7e524fe406d5ef3b47f521 | [
"MIT"
] | null | null | null | Ui_DDSMonitor.py | zhangyintai/Experiment_Manager | 800f95068a12b64d4a7e524fe406d5ef3b47f521 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Z:\Users\Yintai Zhang\Research\ExperimentManger_Test_2\DDSMonitor.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DDSMonitor(object):
def setupUi(self, DDSMonitor):
DDSMonitor.setObjectName("DDSMonitor")
DDSMonitor.resize(1179, 846)
self.Monitor_TextBrowser = QtWidgets.QTextBrowser(DDSMonitor)
self.Monitor_TextBrowser.setGeometry(QtCore.QRect(20, 90, 1141, 731))
self.Monitor_TextBrowser.setObjectName("Monitor_TextBrowser")
self.ChooseDDS_ComboBox = QtWidgets.QComboBox(DDSMonitor)
self.ChooseDDS_ComboBox.setGeometry(QtCore.QRect(100, 30, 181, 22))
self.ChooseDDS_ComboBox.setObjectName("ChooseDDS_ComboBox")
self.ChooseDDS_label = QtWidgets.QLabel(DDSMonitor)
self.ChooseDDS_label.setGeometry(QtCore.QRect(30, 30, 61, 16))
self.ChooseDDS_label.setObjectName("ChooseDDS_label")
self.RefreshTime_label = QtWidgets.QLabel(DDSMonitor)
self.RefreshTime_label.setGeometry(QtCore.QRect(310, 30, 71, 16))
self.RefreshTime_label.setObjectName("RefreshTime_label")
self.RefreshTime_SpinBox = QtWidgets.QDoubleSpinBox(DDSMonitor)
self.RefreshTime_SpinBox.setGeometry(QtCore.QRect(390, 30, 101, 22))
self.RefreshTime_SpinBox.setObjectName("RefreshTime_SpinBox")
self.Start_Button = QtWidgets.QPushButton(DDSMonitor)
self.Start_Button.setGeometry(QtCore.QRect(530, 30, 75, 23))
self.Start_Button.setObjectName("Start_Button")
self.Stop_Button = QtWidgets.QPushButton(DDSMonitor)
self.Stop_Button.setGeometry(QtCore.QRect(620, 30, 75, 23))
self.Stop_Button.setObjectName("Stop_Button")
self.Current_Button = QtWidgets.QPushButton(DDSMonitor)
self.Current_Button.setGeometry(QtCore.QRect(710, 30, 75, 23))
self.Current_Button.setObjectName("Current_Button")
self.retranslateUi(DDSMonitor)
QtCore.QMetaObject.connectSlotsByName(DDSMonitor)
def retranslateUi(self, DDSMonitor):
_translate = QtCore.QCoreApplication.translate
DDSMonitor.setWindowTitle(_translate("DDSMonitor", "DDS Monitor"))
self.Monitor_TextBrowser.setHtml(_translate("DDSMonitor", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:16pt;\">---DDS Monitor---</span></p></body></html>"))
self.ChooseDDS_label.setText(_translate("DDSMonitor", "Choose DDS"))
self.RefreshTime_label.setText(_translate("DDSMonitor", "Refresh Time"))
self.Start_Button.setText(_translate("DDSMonitor", "Start"))
self.Stop_Button.setText(_translate("DDSMonitor", "Stop"))
self.Current_Button.setText(_translate("DDSMonitor", "Current"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DDSMonitor = QtWidgets.QDialog()
ui = Ui_DDSMonitor()
ui.setupUi(DDSMonitor)
DDSMonitor.show()
sys.exit(app.exec_())
| 53.641791 | 201 | 0.698664 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DDSMonitor(object):
def setupUi(self, DDSMonitor):
DDSMonitor.setObjectName("DDSMonitor")
DDSMonitor.resize(1179, 846)
self.Monitor_TextBrowser = QtWidgets.QTextBrowser(DDSMonitor)
self.Monitor_TextBrowser.setGeometry(QtCore.QRect(20, 90, 1141, 731))
self.Monitor_TextBrowser.setObjectName("Monitor_TextBrowser")
self.ChooseDDS_ComboBox = QtWidgets.QComboBox(DDSMonitor)
self.ChooseDDS_ComboBox.setGeometry(QtCore.QRect(100, 30, 181, 22))
self.ChooseDDS_ComboBox.setObjectName("ChooseDDS_ComboBox")
self.ChooseDDS_label = QtWidgets.QLabel(DDSMonitor)
self.ChooseDDS_label.setGeometry(QtCore.QRect(30, 30, 61, 16))
self.ChooseDDS_label.setObjectName("ChooseDDS_label")
self.RefreshTime_label = QtWidgets.QLabel(DDSMonitor)
self.RefreshTime_label.setGeometry(QtCore.QRect(310, 30, 71, 16))
self.RefreshTime_label.setObjectName("RefreshTime_label")
self.RefreshTime_SpinBox = QtWidgets.QDoubleSpinBox(DDSMonitor)
self.RefreshTime_SpinBox.setGeometry(QtCore.QRect(390, 30, 101, 22))
self.RefreshTime_SpinBox.setObjectName("RefreshTime_SpinBox")
self.Start_Button = QtWidgets.QPushButton(DDSMonitor)
self.Start_Button.setGeometry(QtCore.QRect(530, 30, 75, 23))
self.Start_Button.setObjectName("Start_Button")
self.Stop_Button = QtWidgets.QPushButton(DDSMonitor)
self.Stop_Button.setGeometry(QtCore.QRect(620, 30, 75, 23))
self.Stop_Button.setObjectName("Stop_Button")
self.Current_Button = QtWidgets.QPushButton(DDSMonitor)
self.Current_Button.setGeometry(QtCore.QRect(710, 30, 75, 23))
self.Current_Button.setObjectName("Current_Button")
self.retranslateUi(DDSMonitor)
QtCore.QMetaObject.connectSlotsByName(DDSMonitor)
def retranslateUi(self, DDSMonitor):
_translate = QtCore.QCoreApplication.translate
DDSMonitor.setWindowTitle(_translate("DDSMonitor", "DDS Monitor"))
self.Monitor_TextBrowser.setHtml(_translate("DDSMonitor", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:16pt;\">---DDS Monitor---</span></p></body></html>"))
self.ChooseDDS_label.setText(_translate("DDSMonitor", "Choose DDS"))
self.RefreshTime_label.setText(_translate("DDSMonitor", "Refresh Time"))
self.Start_Button.setText(_translate("DDSMonitor", "Start"))
self.Stop_Button.setText(_translate("DDSMonitor", "Stop"))
self.Current_Button.setText(_translate("DDSMonitor", "Current"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DDSMonitor = QtWidgets.QDialog()
ui = Ui_DDSMonitor()
ui.setupUi(DDSMonitor)
DDSMonitor.show()
sys.exit(app.exec_())
| true | true |
f731d291e80cc193af3a536212a228f8cb28fbe4 | 13,723 | py | Python | selfdrive/monitoring/driver_monitor.py | flyingforyou/openpilot | 4667084dcfd9d5f01b39b3858fb8d5328840d5df | [
"MIT"
] | null | null | null | selfdrive/monitoring/driver_monitor.py | flyingforyou/openpilot | 4667084dcfd9d5f01b39b3858fb8d5328840d5df | [
"MIT"
] | null | null | null | selfdrive/monitoring/driver_monitor.py | flyingforyou/openpilot | 4667084dcfd9d5f01b39b3858fb8d5328840d5df | [
"MIT"
] | null | null | null | from math import atan2, sqrt
from cereal import car
from common.numpy_fast import interp
from common.realtime import DT_DMON
from selfdrive.hardware import TICI
from common.filter_simple import FirstOrderFilter
from common.stat_live import RunningStatFilter
EventName = car.CarEvent.EventName
# ******************************************************************************************
# NOTE: To fork maintainers.
# Disabling or nerfing safety features may get you and your users banned from our servers.
# We recommend that you do not change these numbers from the defaults.
# ******************************************************************************************
class DRIVER_MONITOR_SETTINGS():
def __init__(self, TICI=TICI, DT_DMON=DT_DMON):
self._DT_DMON = DT_DMON
self._AWARENESS_TIME = 35. # passive wheeltouch total timeout
self._AWARENESS_PRE_TIME_TILL_TERMINAL = 12.
self._AWARENESS_PROMPT_TIME_TILL_TERMINAL = 6.
self._DISTRACTED_TIME = 11. # active monitoring total timeout
self._DISTRACTED_PRE_TIME_TILL_TERMINAL = 8.
self._DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
self._FACE_THRESHOLD = 0.5
self._PARTIAL_FACE_THRESHOLD = 0.75 if TICI else 0.5
self._EYE_THRESHOLD = 0.5
self._SG_THRESHOLD = 0.5
self._BLINK_THRESHOLD = 0.88 if TICI else 0.5
self._BLINK_THRESHOLD_SLACK = 0.98 if TICI else 0.65
self._BLINK_THRESHOLD_STRICT = 0.88 if TICI else 0.5
self._PITCH_WEIGHT = 1.175 if TICI else 1.35 # pitch matters a lot more
self._POSESTD_THRESHOLD = 0.318 if TICI else 0.14
self._E2E_POSE_THRESHOLD = 0.95 if TICI else 0.9
self._E2E_EYES_THRESHOLD = 0.75
self._METRIC_THRESHOLD = 0.5 if TICI else 0.4
self._METRIC_THRESHOLD_SLACK = 0.6875 if TICI else 0.55
self._METRIC_THRESHOLD_STRICT = 0.5 if TICI else 0.4
self._PITCH_POS_ALLOWANCE = 0.12 # rad, to not be too sensitive on positive pitch
self._PITCH_NATURAL_OFFSET = 0.02 # people don't seem to look straight when they drive relaxed, rather a bit up
self._YAW_NATURAL_OFFSET = 0.08 # people don't seem to look straight when they drive relaxed, rather a bit to the right (center of car)
self._HI_STD_FALLBACK_TIME = int(10 / self._DT_DMON) # fall back to wheel touch if model is uncertain for 10s
self._DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
self._POSE_CALIB_MIN_SPEED = 13 # 30 mph
self._POSE_OFFSET_MIN_COUNT = int(60 / self._DT_DMON) # valid data counts before calibration completes, 1min cumulative
self._POSE_OFFSET_MAX_COUNT = int(360 / self._DT_DMON) # stop deweighting new data after 6 min, aka "short term memory"
self._RECOVERY_FACTOR_MAX = 5. # relative to minus step change
self._RECOVERY_FACTOR_MIN = 1.25 # relative to minus step change
self._MAX_TERMINAL_ALERTS = 3 # not allowed to engage after 3 terminal alerts
self._MAX_TERMINAL_DURATION = int(30 / self._DT_DMON) # not allowed to engage after 30s of terminal alerts
# model output refers to center of cropped image, so need to apply the x displacement offset
RESIZED_FOCAL = 320.0
H, W, FULL_W = 320, 160, 426
class DistractedType:
NOT_DISTRACTED = 0
BAD_POSE = 1
BAD_BLINK = 2
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib, is_rhd):
# the output of these angles are in device frame
# so from driver's perspective, pitch is up and yaw is right
pitch_net, yaw_net, roll_net = angles_desc
face_pixel_position = ((pos_desc[0] + .5)*W - W + FULL_W, (pos_desc[1]+.5)*H)
yaw_focal_angle = atan2(face_pixel_position[0] - FULL_W//2, RESIZED_FOCAL)
pitch_focal_angle = atan2(face_pixel_position[1] - H//2, RESIZED_FOCAL)
pitch = pitch_net + pitch_focal_angle
yaw = -yaw_net + yaw_focal_angle
# no calib for roll
pitch -= rpy_calib[1]
yaw -= rpy_calib[2] * (1 - 2 * int(is_rhd)) # lhd -> -=, rhd -> +=
return roll_net, pitch, yaw
class DriverPose():
def __init__(self, max_trackable):
self.yaw = 0.
self.pitch = 0.
self.roll = 0.
self.yaw_std = 0.
self.pitch_std = 0.
self.roll_std = 0.
self.pitch_offseter = RunningStatFilter(max_trackable=max_trackable)
self.yaw_offseter = RunningStatFilter(max_trackable=max_trackable)
self.low_std = True
self.cfactor = 1.
class DriverBlink():
def __init__(self):
self.left_blink = 0.
self.right_blink = 0.
self.cfactor = 1.
class DriverStatus():
def __init__(self, rhd=False, settings=DRIVER_MONITOR_SETTINGS()):
# init policy settings
self.settings = settings
# init driver status
self.is_rhd_region = rhd
self.pose = DriverPose(self.settings._POSE_OFFSET_MAX_COUNT)
self.pose_calibrated = False
self.blink = DriverBlink()
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
self.driver_distracted = False
self.driver_distraction_filter = FirstOrderFilter(0., self.settings._DISTRACTED_FILTER_TS, self.settings._DT_DMON)
self.face_detected = False
self.face_partial = False
self.terminal_alert_cnt = 0
self.terminal_time = 0
self.step_change = 0.
self.active_monitoring_mode = True
self.is_model_uncertain = False
self.hi_stds = 0
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self._set_timers(active_monitoring=True)
def _set_timers(self, active_monitoring):
if self.active_monitoring_mode and self.awareness <= self.threshold_prompt:
if active_monitoring:
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
else:
self.step_change = 0.
return # no exploit after orange alert
elif self.awareness <= 0.:
return
if active_monitoring:
# when falling back from passive mode to active mode, reset awareness to avoid false alert
if not self.active_monitoring_mode:
self.awareness_passive = self.awareness
self.awareness = self.awareness_active
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
self.active_monitoring_mode = True
else:
if self.active_monitoring_mode:
self.awareness_active = self.awareness
self.awareness = self.awareness_passive
self.threshold_pre = self.settings._AWARENESS_PRE_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
self.threshold_prompt = self.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
self.step_change = self.settings._DT_DMON / self.settings._AWARENESS_TIME
self.active_monitoring_mode = False
def _is_driver_distracted(self, pose, blink):
if not self.pose_calibrated:
pitch_error = pose.pitch - self.settings._PITCH_NATURAL_OFFSET
yaw_error = pose.yaw - self.settings._YAW_NATURAL_OFFSET
else:
pitch_error = pose.pitch - self.pose.pitch_offseter.filtered_stat.mean()
yaw_error = pose.yaw - self.pose.yaw_offseter.filtered_stat.mean()
# positive pitch allowance
if pitch_error > 0.:
pitch_error = max(pitch_error - self.settings._PITCH_POS_ALLOWANCE, 0.)
pitch_error *= self.settings._PITCH_WEIGHT
pose_metric = sqrt(yaw_error**2 + pitch_error**2)
if pose_metric > self.settings._METRIC_THRESHOLD*pose.cfactor:
return DistractedType.BAD_POSE
elif (blink.left_blink + blink.right_blink)*0.5 > self.settings._BLINK_THRESHOLD*blink.cfactor:
return DistractedType.BAD_BLINK
else:
return DistractedType.NOT_DISTRACTED
def set_policy(self, model_data):
ep = min(model_data.meta.engagedProb, 0.8) / 0.8
self.pose.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._METRIC_THRESHOLD_STRICT,
self.settings. _METRIC_THRESHOLD,
self.settings._METRIC_THRESHOLD_SLACK]) / self.settings._METRIC_THRESHOLD
self.blink.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._BLINK_THRESHOLD_STRICT,
self.settings._BLINK_THRESHOLD,
self.settings._BLINK_THRESHOLD_SLACK]) / self.settings._BLINK_THRESHOLD
def get_pose(self, driver_state, cal_rpy, car_speed, op_engaged):
if not all(len(x) > 0 for x in [driver_state.faceOrientation, driver_state.facePosition,
driver_state.faceOrientationStd, driver_state.facePositionStd]):
return
self.face_partial = driver_state.partialFace > self.settings._PARTIAL_FACE_THRESHOLD
self.face_detected = driver_state.faceProb > self.settings._FACE_THRESHOLD or self.face_partial
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_state.faceOrientation, driver_state.facePosition, cal_rpy, self.is_rhd_region)
self.pose.pitch_std = driver_state.faceOrientationStd[0]
self.pose.yaw_std = driver_state.faceOrientationStd[1]
# self.pose.roll_std = driver_state.faceOrientationStd[2]
model_std_max = max(self.pose.pitch_std, self.pose.yaw_std)
self.pose.low_std = model_std_max < self.settings._POSESTD_THRESHOLD and not self.face_partial
self.blink.left_blink = driver_state.leftBlinkProb * (driver_state.leftEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
self.blink.right_blink = driver_state.rightBlinkProb * (driver_state.rightEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
distracted_normal = self._is_driver_distracted(self.pose, self.blink) > 0 and \
driver_state.faceProb > self.settings._FACE_THRESHOLD and self.pose.low_std
distracted_E2E = (driver_state.distractedPose > self.settings._E2E_POSE_THRESHOLD or driver_state.distractedEyes > self.settings._E2E_EYES_THRESHOLD) and \
(self.face_detected and not self.face_partial)
self.driver_distracted = distracted_normal or distracted_E2E
self.driver_distraction_filter.update(self.driver_distracted)
# update offseter
# only update when driver is actively driving the car above a certain speed
if self.face_detected and car_speed > self.settings._POSE_CALIB_MIN_SPEED and self.pose.low_std and (not op_engaged or not self.driver_distracted):
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
self.is_model_uncertain = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME
self._set_timers(self.face_detected and not self.is_model_uncertain)
if self.face_detected and not self.pose.low_std and not self.driver_distracted:
self.hi_stds += 1
elif self.face_detected and self.pose.low_std:
self.hi_stds = 0
def update(self, events, driver_engaged, ctrl_active, standstill):
if (driver_engaged and self.awareness > 0) or not ctrl_active:
# reset only when on disengagement if red reached
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
return
driver_attentive = self.driver_distraction_filter.x < 0.37
awareness_prev = self.awareness
if (driver_attentive and self.face_detected and self.pose.low_std and self.awareness > 0):
# only restore awareness when paying attention and alert is not red
self.awareness = min(self.awareness + ((self.settings._RECOVERY_FACTOR_MAX-self.settings._RECOVERY_FACTOR_MIN)*(1.-self.awareness)+self.settings._RECOVERY_FACTOR_MIN)*self.step_change, 1.)
if self.awareness == 1.:
self.awareness_passive = min(self.awareness_passive + self.step_change, 1.)
# don't display alert banner when awareness is recovering and has cleared orange
if self.awareness > self.threshold_prompt:
return
standstill_exemption = standstill and self.awareness - self.step_change <= self.threshold_prompt
certainly_distracted = self.driver_distraction_filter.x > 0.63 and self.driver_distracted and self.face_detected
maybe_distracted = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME or not self.face_detected
if certainly_distracted or maybe_distracted:
# should always be counting if distracted unless at standstill and reaching orange
if not standstill_exemption:
self.awareness = max(self.awareness - self.step_change, -0.1)
alert = None
if self.awareness <= 0.:
# terminal red alert: disengagement required
alert = EventName.driverDistracted if self.active_monitoring_mode else EventName.driverUnresponsive
self.terminal_time += 1
if awareness_prev > 0.:
self.terminal_alert_cnt += 1
elif self.awareness <= self.threshold_prompt:
# prompt orange alert
alert = EventName.promptDriverDistracted if self.active_monitoring_mode else EventName.promptDriverUnresponsive
elif self.awareness <= self.threshold_pre:
# pre green alert
alert = EventName.preDriverDistracted if self.active_monitoring_mode else EventName.preDriverUnresponsive
if alert is not None:
events.add(alert)
| 49.010714 | 194 | 0.722218 | from math import atan2, sqrt
from cereal import car
from common.numpy_fast import interp
from common.realtime import DT_DMON
from selfdrive.hardware import TICI
from common.filter_simple import FirstOrderFilter
from common.stat_live import RunningStatFilter
EventName = car.CarEvent.EventName
class DRIVER_MONITOR_SETTINGS():
def __init__(self, TICI=TICI, DT_DMON=DT_DMON):
self._DT_DMON = DT_DMON
self._AWARENESS_TIME = 35.
self._AWARENESS_PRE_TIME_TILL_TERMINAL = 12.
self._AWARENESS_PROMPT_TIME_TILL_TERMINAL = 6.
self._DISTRACTED_TIME = 11.
self._DISTRACTED_PRE_TIME_TILL_TERMINAL = 8.
self._DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
self._FACE_THRESHOLD = 0.5
self._PARTIAL_FACE_THRESHOLD = 0.75 if TICI else 0.5
self._EYE_THRESHOLD = 0.5
self._SG_THRESHOLD = 0.5
self._BLINK_THRESHOLD = 0.88 if TICI else 0.5
self._BLINK_THRESHOLD_SLACK = 0.98 if TICI else 0.65
self._BLINK_THRESHOLD_STRICT = 0.88 if TICI else 0.5
self._PITCH_WEIGHT = 1.175 if TICI else 1.35
self._POSESTD_THRESHOLD = 0.318 if TICI else 0.14
self._E2E_POSE_THRESHOLD = 0.95 if TICI else 0.9
self._E2E_EYES_THRESHOLD = 0.75
self._METRIC_THRESHOLD = 0.5 if TICI else 0.4
self._METRIC_THRESHOLD_SLACK = 0.6875 if TICI else 0.55
self._METRIC_THRESHOLD_STRICT = 0.5 if TICI else 0.4
self._PITCH_POS_ALLOWANCE = 0.12
self._PITCH_NATURAL_OFFSET = 0.02
self._YAW_NATURAL_OFFSET = 0.08 # people don't seem to look straight when they drive relaxed, rather a bit to the right (center of car)
self._HI_STD_FALLBACK_TIME = int(10 / self._DT_DMON)
self._DISTRACTED_FILTER_TS = 0.25
self._POSE_CALIB_MIN_SPEED = 13
self._POSE_OFFSET_MIN_COUNT = int(60 / self._DT_DMON)
self._POSE_OFFSET_MAX_COUNT = int(360 / self._DT_DMON)
self._RECOVERY_FACTOR_MAX = 5.
self._RECOVERY_FACTOR_MIN = 1.25
self._MAX_TERMINAL_ALERTS = 3
self._MAX_TERMINAL_DURATION = int(30 / self._DT_DMON)
RESIZED_FOCAL = 320.0
H, W, FULL_W = 320, 160, 426
class DistractedType:
NOT_DISTRACTED = 0
BAD_POSE = 1
BAD_BLINK = 2
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib, is_rhd):
pitch_net, yaw_net, roll_net = angles_desc
face_pixel_position = ((pos_desc[0] + .5)*W - W + FULL_W, (pos_desc[1]+.5)*H)
yaw_focal_angle = atan2(face_pixel_position[0] - FULL_W//2, RESIZED_FOCAL)
pitch_focal_angle = atan2(face_pixel_position[1] - H//2, RESIZED_FOCAL)
pitch = pitch_net + pitch_focal_angle
yaw = -yaw_net + yaw_focal_angle
# no calib for roll
pitch -= rpy_calib[1]
yaw -= rpy_calib[2] * (1 - 2 * int(is_rhd)) # lhd -> -=, rhd -> +=
return roll_net, pitch, yaw
class DriverPose():
def __init__(self, max_trackable):
self.yaw = 0.
self.pitch = 0.
self.roll = 0.
self.yaw_std = 0.
self.pitch_std = 0.
self.roll_std = 0.
self.pitch_offseter = RunningStatFilter(max_trackable=max_trackable)
self.yaw_offseter = RunningStatFilter(max_trackable=max_trackable)
self.low_std = True
self.cfactor = 1.
class DriverBlink():
def __init__(self):
self.left_blink = 0.
self.right_blink = 0.
self.cfactor = 1.
class DriverStatus():
def __init__(self, rhd=False, settings=DRIVER_MONITOR_SETTINGS()):
# init policy settings
self.settings = settings
# init driver status
self.is_rhd_region = rhd
self.pose = DriverPose(self.settings._POSE_OFFSET_MAX_COUNT)
self.pose_calibrated = False
self.blink = DriverBlink()
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
self.driver_distracted = False
self.driver_distraction_filter = FirstOrderFilter(0., self.settings._DISTRACTED_FILTER_TS, self.settings._DT_DMON)
self.face_detected = False
self.face_partial = False
self.terminal_alert_cnt = 0
self.terminal_time = 0
self.step_change = 0.
self.active_monitoring_mode = True
self.is_model_uncertain = False
self.hi_stds = 0
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self._set_timers(active_monitoring=True)
def _set_timers(self, active_monitoring):
if self.active_monitoring_mode and self.awareness <= self.threshold_prompt:
if active_monitoring:
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
else:
self.step_change = 0.
return # no exploit after orange alert
elif self.awareness <= 0.:
return
if active_monitoring:
# when falling back from passive mode to active mode, reset awareness to avoid false alert
if not self.active_monitoring_mode:
self.awareness_passive = self.awareness
self.awareness = self.awareness_active
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
self.active_monitoring_mode = True
else:
if self.active_monitoring_mode:
self.awareness_active = self.awareness
self.awareness = self.awareness_passive
self.threshold_pre = self.settings._AWARENESS_PRE_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
self.threshold_prompt = self.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
self.step_change = self.settings._DT_DMON / self.settings._AWARENESS_TIME
self.active_monitoring_mode = False
def _is_driver_distracted(self, pose, blink):
if not self.pose_calibrated:
pitch_error = pose.pitch - self.settings._PITCH_NATURAL_OFFSET
yaw_error = pose.yaw - self.settings._YAW_NATURAL_OFFSET
else:
pitch_error = pose.pitch - self.pose.pitch_offseter.filtered_stat.mean()
yaw_error = pose.yaw - self.pose.yaw_offseter.filtered_stat.mean()
# positive pitch allowance
if pitch_error > 0.:
pitch_error = max(pitch_error - self.settings._PITCH_POS_ALLOWANCE, 0.)
pitch_error *= self.settings._PITCH_WEIGHT
pose_metric = sqrt(yaw_error**2 + pitch_error**2)
if pose_metric > self.settings._METRIC_THRESHOLD*pose.cfactor:
return DistractedType.BAD_POSE
elif (blink.left_blink + blink.right_blink)*0.5 > self.settings._BLINK_THRESHOLD*blink.cfactor:
return DistractedType.BAD_BLINK
else:
return DistractedType.NOT_DISTRACTED
def set_policy(self, model_data):
ep = min(model_data.meta.engagedProb, 0.8) / 0.8
self.pose.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._METRIC_THRESHOLD_STRICT,
self.settings. _METRIC_THRESHOLD,
self.settings._METRIC_THRESHOLD_SLACK]) / self.settings._METRIC_THRESHOLD
self.blink.cfactor = interp(ep, [0, 0.5, 1],
[self.settings._BLINK_THRESHOLD_STRICT,
self.settings._BLINK_THRESHOLD,
self.settings._BLINK_THRESHOLD_SLACK]) / self.settings._BLINK_THRESHOLD
def get_pose(self, driver_state, cal_rpy, car_speed, op_engaged):
if not all(len(x) > 0 for x in [driver_state.faceOrientation, driver_state.facePosition,
driver_state.faceOrientationStd, driver_state.facePositionStd]):
return
self.face_partial = driver_state.partialFace > self.settings._PARTIAL_FACE_THRESHOLD
self.face_detected = driver_state.faceProb > self.settings._FACE_THRESHOLD or self.face_partial
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_state.faceOrientation, driver_state.facePosition, cal_rpy, self.is_rhd_region)
self.pose.pitch_std = driver_state.faceOrientationStd[0]
self.pose.yaw_std = driver_state.faceOrientationStd[1]
# self.pose.roll_std = driver_state.faceOrientationStd[2]
model_std_max = max(self.pose.pitch_std, self.pose.yaw_std)
self.pose.low_std = model_std_max < self.settings._POSESTD_THRESHOLD and not self.face_partial
self.blink.left_blink = driver_state.leftBlinkProb * (driver_state.leftEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
self.blink.right_blink = driver_state.rightBlinkProb * (driver_state.rightEyeProb > self.settings._EYE_THRESHOLD) * (driver_state.sunglassesProb < self.settings._SG_THRESHOLD)
distracted_normal = self._is_driver_distracted(self.pose, self.blink) > 0 and \
driver_state.faceProb > self.settings._FACE_THRESHOLD and self.pose.low_std
distracted_E2E = (driver_state.distractedPose > self.settings._E2E_POSE_THRESHOLD or driver_state.distractedEyes > self.settings._E2E_EYES_THRESHOLD) and \
(self.face_detected and not self.face_partial)
self.driver_distracted = distracted_normal or distracted_E2E
self.driver_distraction_filter.update(self.driver_distracted)
# update offseter
# only update when driver is actively driving the car above a certain speed
if self.face_detected and car_speed > self.settings._POSE_CALIB_MIN_SPEED and self.pose.low_std and (not op_engaged or not self.driver_distracted):
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
self.is_model_uncertain = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME
self._set_timers(self.face_detected and not self.is_model_uncertain)
if self.face_detected and not self.pose.low_std and not self.driver_distracted:
self.hi_stds += 1
elif self.face_detected and self.pose.low_std:
self.hi_stds = 0
def update(self, events, driver_engaged, ctrl_active, standstill):
if (driver_engaged and self.awareness > 0) or not ctrl_active:
# reset only when on disengagement if red reached
self.awareness = 1.
self.awareness_active = 1.
self.awareness_passive = 1.
return
driver_attentive = self.driver_distraction_filter.x < 0.37
awareness_prev = self.awareness
if (driver_attentive and self.face_detected and self.pose.low_std and self.awareness > 0):
# only restore awareness when paying attention and alert is not red
self.awareness = min(self.awareness + ((self.settings._RECOVERY_FACTOR_MAX-self.settings._RECOVERY_FACTOR_MIN)*(1.-self.awareness)+self.settings._RECOVERY_FACTOR_MIN)*self.step_change, 1.)
if self.awareness == 1.:
self.awareness_passive = min(self.awareness_passive + self.step_change, 1.)
# don't display alert banner when awareness is recovering and has cleared orange
if self.awareness > self.threshold_prompt:
return
standstill_exemption = standstill and self.awareness - self.step_change <= self.threshold_prompt
certainly_distracted = self.driver_distraction_filter.x > 0.63 and self.driver_distracted and self.face_detected
maybe_distracted = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME or not self.face_detected
if certainly_distracted or maybe_distracted:
if not standstill_exemption:
self.awareness = max(self.awareness - self.step_change, -0.1)
alert = None
if self.awareness <= 0.:
alert = EventName.driverDistracted if self.active_monitoring_mode else EventName.driverUnresponsive
self.terminal_time += 1
if awareness_prev > 0.:
self.terminal_alert_cnt += 1
elif self.awareness <= self.threshold_prompt:
alert = EventName.promptDriverDistracted if self.active_monitoring_mode else EventName.promptDriverUnresponsive
elif self.awareness <= self.threshold_pre:
alert = EventName.preDriverDistracted if self.active_monitoring_mode else EventName.preDriverUnresponsive
if alert is not None:
events.add(alert)
| true | true |
f731d2d82ea0d55223333c1605aa04d4f407f8b3 | 2,426 | py | Python | vivid/endpoint.py | haikuginger/vivid | 1ddfe293f0b5a95f3d52f5e737e85d98be1eaab8 | [
"MIT"
] | 4 | 2017-11-19T03:22:46.000Z | 2019-02-28T08:32:43.000Z | vivid/endpoint.py | haikuginger/vivid | 1ddfe293f0b5a95f3d52f5e737e85d98be1eaab8 | [
"MIT"
] | null | null | null | vivid/endpoint.py | haikuginger/vivid | 1ddfe293f0b5a95f3d52f5e737e85d98be1eaab8 | [
"MIT"
] | null | null | null | from vivid.common import ParametersMixin
class Endpoint(ParametersMixin, object):
"""
Descriptor that describes an attribute which acts
as a function making an HTTP request. Return a
bound endpoint attached to this and the base API.
"""
def __init__(self, method, path, *parameters):
"""
Save relevant data at init; this includes the
path component of the URL we want, as well as
the HTTP method, and any endpoint-specific vars.
"""
self.method = method
self.path = path
self.parameters = parameters
super(Endpoint, self).__init__()
def __get__(self, instance, _owner):
"""
Return a bound endpoint object tied to both this
object and to the parent BaseApiClient instance.
"""
return BoundEndpoint(self, instance)
class BoundEndpoint(object):
def __init__(self, endpoint, api_instance):
"""
Store links to both the unbound endpoint and the
API client instance we're bound to
"""
self.endpoint = endpoint
self.api_instance = api_instance
def __call__(self, **kwargs):
"""
Handle being called like a function
"""
request = {
'method': self.endpoint.method,
'url': self.full_url
}
request_kwargs = self.get_base_kwargs()
request_kwargs.update(kwargs)
self.apply_parameters(request, request_kwargs)
return self.api_instance.request(request)
def apply_parameters(self, request, request_kwargs):
"""
Apply endpoint- and client-defined parameters to the request
"""
self.api_instance.apply_parameters(request, request_kwargs)
self.endpoint.apply_parameters(request, request_kwargs)
self.finalize(request)
def get_base_kwargs(self):
"""
Get the base kwargs from the instance
"""
return self.api_instance.base_kwargs.copy()
@property
def full_url(self):
"""
Build a complete URL for the bound endpoint
"""
return self.api_instance.root + self.endpoint.path
def finalize(self, request):
"""
Do any followup/cleanup work that any variables requested.
"""
followup_work = request.pop('followup', {})
for item in followup_work.values():
item(request) | 31.102564 | 68 | 0.622012 | from vivid.common import ParametersMixin
class Endpoint(ParametersMixin, object):
def __init__(self, method, path, *parameters):
self.method = method
self.path = path
self.parameters = parameters
super(Endpoint, self).__init__()
def __get__(self, instance, _owner):
return BoundEndpoint(self, instance)
class BoundEndpoint(object):
def __init__(self, endpoint, api_instance):
self.endpoint = endpoint
self.api_instance = api_instance
def __call__(self, **kwargs):
request = {
'method': self.endpoint.method,
'url': self.full_url
}
request_kwargs = self.get_base_kwargs()
request_kwargs.update(kwargs)
self.apply_parameters(request, request_kwargs)
return self.api_instance.request(request)
def apply_parameters(self, request, request_kwargs):
self.api_instance.apply_parameters(request, request_kwargs)
self.endpoint.apply_parameters(request, request_kwargs)
self.finalize(request)
def get_base_kwargs(self):
return self.api_instance.base_kwargs.copy()
@property
def full_url(self):
return self.api_instance.root + self.endpoint.path
def finalize(self, request):
followup_work = request.pop('followup', {})
for item in followup_work.values():
item(request) | true | true |
f731d330a352677e2aec29c1fc65fdf8568da45c | 1,747 | py | Python | neuralmonkey/processors/alignment.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | 5 | 2017-04-24T21:10:03.000Z | 2019-05-22T13:19:35.000Z | neuralmonkey/processors/alignment.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | null | null | null | neuralmonkey/processors/alignment.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | 5 | 2017-04-25T01:36:44.000Z | 2019-12-13T15:04:03.000Z | import re
from typing import List
import numpy as np
# pylint: disable=too-few-public-methods
ID_SEP = re.compile(r"[-:]")
class WordAlignmentPreprocessor(object):
"""A preprocessor for word alignments in a text format.
One of the following formats is expected:
s1-t1 s2-t2 ...
s1:1/w1 s2:t2/w2 ...
where each `s` and `t` is the index of a word in the source and target
sentence, respectively, and `w` is the corresponding weight. If the weight
is not given, it is assumend to be 1. The separators `-` and `:` are
interchangeable.
The output of the preprocessor is an alignment matrix of the fixed shape
(target_len, source_len) for each sentence.
"""
def __init__(self, source_len, target_len, dtype=np.float32,
normalize=True, zero_based=True):
self._source_len = source_len
self._target_len = target_len
self._dtype = dtype
self._normalize = normalize
self._zero_based = zero_based
def __call__(self, sentence: List[str]):
result = np.zeros((self._target_len, self._source_len), self._dtype)
for ali in sentence:
ids, _, str_weight = ali.partition("/")
i, j = [int(id_str) for id_str in ID_SEP.split(ids)]
weight = float(str_weight) if str_weight else 1.
if not self._zero_based:
i -= 1
j -= 1
if i < self._source_len and j < self._target_len:
result[j][i] = weight
if self._normalize:
with np.errstate(divide="ignore", invalid="ignore"):
result /= result.sum(axis=1, keepdims=True)
result[np.isnan(result)] = 0
return result
| 30.12069 | 78 | 0.613051 | import re
from typing import List
import numpy as np
ID_SEP = re.compile(r"[-:]")
class WordAlignmentPreprocessor(object):
def __init__(self, source_len, target_len, dtype=np.float32,
normalize=True, zero_based=True):
self._source_len = source_len
self._target_len = target_len
self._dtype = dtype
self._normalize = normalize
self._zero_based = zero_based
def __call__(self, sentence: List[str]):
result = np.zeros((self._target_len, self._source_len), self._dtype)
for ali in sentence:
ids, _, str_weight = ali.partition("/")
i, j = [int(id_str) for id_str in ID_SEP.split(ids)]
weight = float(str_weight) if str_weight else 1.
if not self._zero_based:
i -= 1
j -= 1
if i < self._source_len and j < self._target_len:
result[j][i] = weight
if self._normalize:
with np.errstate(divide="ignore", invalid="ignore"):
result /= result.sum(axis=1, keepdims=True)
result[np.isnan(result)] = 0
return result
| true | true |
f731d397e48ee7a09bfdd1a865be4a4cff1b7f89 | 5,899 | py | Python | archive/script_check_quote.py | THS-on/keylime | bb904fc98d9674832e630542d211e71102873b4d | [
"Apache-2.0"
] | 192 | 2019-05-08T14:43:50.000Z | 2022-03-28T20:21:28.000Z | archive/script_check_quote.py | THS-on/keylime | bb904fc98d9674832e630542d211e71102873b4d | [
"Apache-2.0"
] | 694 | 2019-04-18T14:08:36.000Z | 2022-03-31T13:55:37.000Z | archive/script_check_quote.py | THS-on/keylime | bb904fc98d9674832e630542d211e71102873b4d | [
"Apache-2.0"
] | 97 | 2019-04-17T19:04:00.000Z | 2022-03-20T18:19:28.000Z | #!/usr/bin/env python
'''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import keylime.config
keylime.config.USE_CLIME=True
from keylime.tpm_quote import check_deep_quote, check_quote
from timeit import timeit
from timeit import default_timer as timer
import logging
import sys
import os
import tempfile
import subprocess
import base64
logging.basicConfig(stream=sys.stdout, level=logging.WARN,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('test_check_quote')
runs = 250
test_clime=True
tpm_policy = {'22':'ffffffffffffffffffffffffffffffffffffffff','16':'0000000000000000000000000000000000000000'}
quote = keylime.config.TEST_QUOTE
aik=keylime.config.TEST_AIK
# now do it raw
try:
# write out quote
qfd, qtemp = tempfile.mkstemp()
quoteFile = open(qtemp,"wb")
quoteFile.write(base64.b64decode(quote).decode("zlib"))
quoteFile.close()
os.close(qfd)
afd, atemp = tempfile.mkstemp()
aikFile = open(atemp,"w")
aikFile.write(aik)
aikFile.close()
os.close(afd)
print('Checking quote raw %d times ... '%(runs), end='')
cmd = "for i in `seq 1 %d`; do checkquote -aik %s -quote %s -nonce %s > /dev/null; done"%(runs,aikFile.name, quoteFile.name, keylime.config.TEST_NONCE)
start = timer()
proc = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
proc.wait()
end = timer()
c = end - start
print("DONE")
# while True:
# line = proc.stdout.readline()
# if line=="":
# break
# print(line)
print("check_quote(raw): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
except Exception as e:
logger.exception(e)
finally:
if aikFile is not None:
os.remove(aikFile.name)
if quoteFile is not None:
os.remove(quoteFile.name)
pass
print('Checking quote %s times ... '%(runs), end='')
keylime.config.STUB_TPM=True
keylime.config.USE_CLIME=False
setup = 'from __main__ import quote,aik,logger,tpm_policy, check_quote'
c = timeit('check_quote(None, None, quote,aik,logger,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_quote: %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
if test_clime:
keylime.config.USE_CLIME=True
print('Checking quote %s times with cLime... '%(runs), end='')
setup = 'from __main__ import quote,aik,logger,tpm_policy, check_quote'
c = timeit('check_quote(None, None, quote,aik,logger,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_quote(cLime): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
print("\n================================\n\n")
keylime.config.USE_CLIME=True
tpm_policy = {'22':'ffffffffffffffffffffffffffffffffffffffff','16':'0000000000000000000000000000000000000000'}
vtpm_policy = {'23':'0000000000000000000000000000000000000000','16':'0000000000000000000000000000000000000000'}
quote = keylime.config.TEST_DQ
vaik=keylime.config.TEST_VAIK
haik=keylime.config.TEST_HAIK
# now do it raw
try:
# write out quote
qfd, qtemp = tempfile.mkstemp()
quoteFile = open(qtemp,"wb")
quoteFile.write(base64.b64decode(quote).decode("zlib"))
quoteFile.close()
os.close(qfd)
afd, atemp = tempfile.mkstemp()
vAIKFile = open(atemp,"w")
vAIKFile.write(vaik)
vAIKFile.close()
os.close(afd)
afd, atemp = tempfile.mkstemp()
hAIKFile = open(atemp,"w")
hAIKFile.write(haik)
hAIKFile.close()
os.close(afd)
print('Checking deep quote raw %d times ... '%(runs), end='')
cmd = "for i in `seq 1 %d`; do checkdeepquote -aik %s -deepquote %s -nonce %s -vaik %s > /dev/null ; done"%(runs, hAIKFile.name, quoteFile.name, keylime.config.TEST_DQ_NONCE, vAIKFile.name)
start = timer()
proc = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
proc.wait()
end = timer()
c = end - start
print("DONE")
# while True:
# line = proc.stdout.readline()
# if line=="":
# break
# print("="+line)
print("check_deep_quote (raw): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
except Exception as e:
logger.exception(e)
finally:
if vAIKFile is not None:
os.remove(vAIKFile.name)
if hAIKFile is not None:
os.remove(hAIKFile.name)
if quoteFile is not None:
os.remove(quoteFile.name)
pass
print('Checking deep quote %s times ... '%(runs), end='')
keylime.config.STUB_TPM=True
setup = 'from __main__ import quote,vaik,haik,logger,vtpm_policy,tpm_policy, check_deep_quote'
c = timeit('check_deep_quote(None, None, quote,vaik,haik,logger,vtpm_policy,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_deep_quote: %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
print("\n================================\n\n")
| 33.902299 | 193 | 0.689778 |
import keylime.config
keylime.config.USE_CLIME=True
from keylime.tpm_quote import check_deep_quote, check_quote
from timeit import timeit
from timeit import default_timer as timer
import logging
import sys
import os
import tempfile
import subprocess
import base64
logging.basicConfig(stream=sys.stdout, level=logging.WARN,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('test_check_quote')
runs = 250
test_clime=True
tpm_policy = {'22':'ffffffffffffffffffffffffffffffffffffffff','16':'0000000000000000000000000000000000000000'}
quote = keylime.config.TEST_QUOTE
aik=keylime.config.TEST_AIK
try:
qfd, qtemp = tempfile.mkstemp()
quoteFile = open(qtemp,"wb")
quoteFile.write(base64.b64decode(quote).decode("zlib"))
quoteFile.close()
os.close(qfd)
afd, atemp = tempfile.mkstemp()
aikFile = open(atemp,"w")
aikFile.write(aik)
aikFile.close()
os.close(afd)
print('Checking quote raw %d times ... '%(runs), end='')
cmd = "for i in `seq 1 %d`; do checkquote -aik %s -quote %s -nonce %s > /dev/null; done"%(runs,aikFile.name, quoteFile.name, keylime.config.TEST_NONCE)
start = timer()
proc = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
proc.wait()
end = timer()
c = end - start
print("DONE")
print("check_quote(raw): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
except Exception as e:
logger.exception(e)
finally:
if aikFile is not None:
os.remove(aikFile.name)
if quoteFile is not None:
os.remove(quoteFile.name)
pass
print('Checking quote %s times ... '%(runs), end='')
keylime.config.STUB_TPM=True
keylime.config.USE_CLIME=False
setup = 'from __main__ import quote,aik,logger,tpm_policy, check_quote'
c = timeit('check_quote(None, None, quote,aik,logger,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_quote: %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
if test_clime:
keylime.config.USE_CLIME=True
print('Checking quote %s times with cLime... '%(runs), end='')
setup = 'from __main__ import quote,aik,logger,tpm_policy, check_quote'
c = timeit('check_quote(None, None, quote,aik,logger,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_quote(cLime): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
print("\n================================\n\n")
keylime.config.USE_CLIME=True
tpm_policy = {'22':'ffffffffffffffffffffffffffffffffffffffff','16':'0000000000000000000000000000000000000000'}
vtpm_policy = {'23':'0000000000000000000000000000000000000000','16':'0000000000000000000000000000000000000000'}
quote = keylime.config.TEST_DQ
vaik=keylime.config.TEST_VAIK
haik=keylime.config.TEST_HAIK
try:
qfd, qtemp = tempfile.mkstemp()
quoteFile = open(qtemp,"wb")
quoteFile.write(base64.b64decode(quote).decode("zlib"))
quoteFile.close()
os.close(qfd)
afd, atemp = tempfile.mkstemp()
vAIKFile = open(atemp,"w")
vAIKFile.write(vaik)
vAIKFile.close()
os.close(afd)
afd, atemp = tempfile.mkstemp()
hAIKFile = open(atemp,"w")
hAIKFile.write(haik)
hAIKFile.close()
os.close(afd)
print('Checking deep quote raw %d times ... '%(runs), end='')
cmd = "for i in `seq 1 %d`; do checkdeepquote -aik %s -deepquote %s -nonce %s -vaik %s > /dev/null ; done"%(runs, hAIKFile.name, quoteFile.name, keylime.config.TEST_DQ_NONCE, vAIKFile.name)
start = timer()
proc = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
proc.wait()
end = timer()
c = end - start
print("DONE")
print("check_deep_quote (raw): %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
except Exception as e:
logger.exception(e)
finally:
if vAIKFile is not None:
os.remove(vAIKFile.name)
if hAIKFile is not None:
os.remove(hAIKFile.name)
if quoteFile is not None:
os.remove(quoteFile.name)
pass
print('Checking deep quote %s times ... '%(runs), end='')
keylime.config.STUB_TPM=True
setup = 'from __main__ import quote,vaik,haik,logger,vtpm_policy,tpm_policy, check_deep_quote'
c = timeit('check_deep_quote(None, None, quote,vaik,haik,logger,vtpm_policy,tpm_policy)', number=runs, setup=setup)
print('DONE')
print("check_deep_quote: %d runs, total time %f, avg %f ms per run" % (runs,c,c/runs*1000))
print("\n================================\n\n")
| true | true |
f731d3eb4aae5e78db2bfaf2ea2d5f1e9832f7d4 | 9,108 | py | Python | repos/system_upgrade/el7toel8/actors/systemfacts/libraries/systemfacts.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/systemfacts/libraries/systemfacts.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/systemfacts/libraries/systemfacts.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | import errno
import functools
import grp
import json
import logging
import os
import pwd
import re
from six.moves import configparser
import six
from leapp.libraries.stdlib import CalledProcessError, api, run
from leapp.models import SysctlVariablesFacts, SysctlVariable, ActiveKernelModulesFacts, ActiveKernelModule, \
KernelModuleParameter, UsersFacts, User, GroupsFacts, Group, RepositoriesFacts, RepositoryFile, RepositoryData, \
SELinuxFacts, fields, FirewallStatus, FirewallsFacts
def aslist(f):
''' Decorator used to convert generator to list '''
@functools.wraps(f)
def inner(*args, **kwargs):
return list(f(*args, **kwargs))
return inner
def anyendswith(value, ends):
''' Check if `value` ends with one of the possible `ends` '''
for end in ends:
if value.endswith(end):
return True
return False
def anyhasprefix(value, prefixes):
''' Check if `value` starts with on of the possible `prefixes` '''
for p in prefixes:
if value.startswith(p):
return True
return False
@aslist
def _get_system_users():
for p in pwd.getpwall():
yield User(
name=p.pw_name,
uid=p.pw_uid,
gid=p.pw_gid,
home=p.pw_dir
)
def get_system_users_status():
''' Get a list of users from `/etc/passwd` '''
return UsersFacts(users=_get_system_users())
@aslist
def _get_system_groups():
for g in grp.getgrall():
yield Group(
name=g.gr_name,
gid=g.gr_gid,
members=g.gr_mem
)
def get_system_groups_status():
''' Get a list of groups from `/etc/groups` '''
return GroupsFacts(groups=_get_system_groups())
@aslist
def _get_active_kernel_modules(logger):
lines = run(['lsmod'], split=True)['stdout']
for l in lines[1:]:
name = l.split(' ')[0]
# Read parameters of the given module as exposed by the
# `/sys` VFS, if there are no parameters exposed we just
# take the name of the module
base_path = '/sys/module/{module}'.format(module=name)
parameters_path = os.path.join(base_path, 'parameters')
if not os.path.exists(parameters_path):
yield ActiveKernelModule(filename=name, parameters=[])
continue
# Use `modinfo` to probe for signature information
parameter_dict = {}
try:
signature = run(['modinfo', '-F', 'signature', name], split=False)['stdout']
except CalledProcessError:
signature = None
signature_string = None
if signature:
# Remove whitspace from the signature string
signature_string = re.sub(r"\s+", "", signature, flags=re.UNICODE)
# Since we're using the `/sys` VFS we need to use `os.listdir()` to get
# all the property names and then just read from all the listed paths
parameters = sorted(os.listdir(parameters_path))
for param in parameters:
try:
with open(os.path.join(parameters_path, param), mode='r') as fp:
parameter_dict[param] = fp.read().strip()
except IOError as exc:
# Some parameters are write-only, in that case we just log the name of parameter
# and the module and continue
if exc.errno in (errno.EACCES, errno.EPERM):
msg = 'Unable to read parameter "{param}" of kernel module "{name}"'
logger.warning(msg.format(param=param, name=name))
else:
raise exc
# Project the dictionary as a list of key values
items = [
KernelModuleParameter(name=k, value=v)
for (k, v) in six.iteritems(parameter_dict)
]
yield ActiveKernelModule(
filename=name,
parameters=items,
signature=signature_string
)
def get_active_kernel_modules_status(logger):
''' Get a list of active kernel modules '''
return ActiveKernelModulesFacts(kernel_modules=_get_active_kernel_modules(logger))
@aslist
def _get_sysctls():
unstable = ('fs.dentry-state', 'fs.file-nr', 'fs.inode-nr',
'fs.inode-state', 'kernel.random.uuid', 'kernel.random.entropy_avail',
'kernel.ns_last_pid', 'net.netfilter.nf_conntrack_count',
'net.netfilter.nf_conntrack_events', 'kernel.sched_domain.',
'dev.cdrom.info', 'kernel.pty.nr')
variables = []
for sc in run(['sysctl', '-a'], split=True)['stdout']:
name = sc.split(' ', 1)[0]
# if the sysctl name has an unstable prefix, we skip
if anyhasprefix(name, unstable):
continue
variables.append(sc)
# sort our variables so they can be diffed directly when needed
for var in sorted(variables):
name, value = tuple(map(type(var).strip, var.split('=')))
yield SysctlVariable(
name=name,
value=value
)
def get_sysctls_status():
r''' Get a list of stable `sysctls` variables
Note that some variables are inherently unstable and we need to blacklist
them:
diff -u <(sysctl -a 2>/dev/null | sort) <(sysctl -a 2>/dev/null | sort)\
| grep -E '^\+[a-z]'\
| cut -d' ' -f1\
| cut -d+ -f2
'''
return SysctlVariablesFacts(sysctl_variables=_get_sysctls())
@aslist
def _get_repositories():
def asbool(x):
return x == '1'
@aslist
def _parse(r):
with open(r, mode='r') as fp:
cp = configparser.ConfigParser()
cp.readfp(fp)
for section in cp.sections():
prepared = {'repoid': section, 'additional_fields': {}}
data = dict(cp.items(section))
for key in data.keys():
if key in RepositoryData.fields:
if isinstance(RepositoryData.fields[key], fields.Boolean):
data[key] = asbool(data[key])
prepared[key] = data[key]
else:
prepared['additional_fields'][key] = data[key]
prepared['additional_fields'] = json.dumps(prepared['additional_fields'])
yield RepositoryData(**prepared)
repos = run(
['find', '/etc/yum.repos.d/', '-type', 'f', '-name', '*.repo'],
split=True
)['stdout']
for repo in repos:
yield RepositoryFile(file=repo, data=_parse(repo))
def get_repositories_status():
''' Get a basic information about YUM repositories installed in the system '''
return RepositoriesFacts(repositories=_get_repositories())
def get_selinux_status():
''' Get SELinux status information '''
# will be None if something went wrong or contain SELinuxFacts otherwise
res = None
try:
import selinux
except ImportError:
api.report_error("SELinux Import Error", details="libselinux-python package must be installed.")
return res
outdata = dict({'enabled': selinux.is_selinux_enabled() == 1})
outdata['mls_enabled'] = selinux.is_selinux_mls_enabled() == 1
try:
outdata['runtime_mode'] = "enforcing" if selinux.security_getenforce() == 1 else "permissive"
# FIXME: check selinux_getenforcemode[0] (that should be return value of a underneath function)
enforce_mode = selinux.selinux_getenforcemode()[1]
if enforce_mode >= 0:
outdata['static_mode'] = "enforcing" if enforce_mode == 1 else "permissive"
else:
outdata['static_mode'] = "disabled"
outdata['policy'] = selinux.selinux_getpolicytype()[1]
except OSError:
# This happens when SELinux is disabled
# [Errno 2] No such file or directory
outdata['runtime_mode'] = 'permissive'
outdata['static_mode'] = 'disabled'
outdata['policy'] = 'targeted'
res = SELinuxFacts(**outdata)
return res
def get_firewalls_status():
''' Get firewalld status information '''
logger = logging.getLogger('get_firewalld_status')
def _get_firewall_status(service_name):
try:
ret_list = run(['systemctl', 'is-active', service_name], split=True)['stdout']
active = ret_list[0] == 'active'
except CalledProcessError:
active = False
logger.debug('The %s service is likely not active', service_name)
try:
ret_list = run(['systemctl', 'is-enabled', service_name], split=True)['stdout']
enabled = ret_list[0] == 'enabled'
except CalledProcessError:
enabled = False
logger.debug('The %s service is likely not enabled nor running', service_name)
return FirewallStatus(
active=active,
enabled=enabled,
)
return FirewallsFacts(
firewalld=_get_firewall_status('firewalld'),
iptables=_get_firewall_status('iptables'),
ip6tables=_get_firewall_status('ip6tables'),
)
| 33.485294 | 117 | 0.60639 | import errno
import functools
import grp
import json
import logging
import os
import pwd
import re
from six.moves import configparser
import six
from leapp.libraries.stdlib import CalledProcessError, api, run
from leapp.models import SysctlVariablesFacts, SysctlVariable, ActiveKernelModulesFacts, ActiveKernelModule, \
KernelModuleParameter, UsersFacts, User, GroupsFacts, Group, RepositoriesFacts, RepositoryFile, RepositoryData, \
SELinuxFacts, fields, FirewallStatus, FirewallsFacts
def aslist(f):
@functools.wraps(f)
def inner(*args, **kwargs):
return list(f(*args, **kwargs))
return inner
def anyendswith(value, ends):
for end in ends:
if value.endswith(end):
return True
return False
def anyhasprefix(value, prefixes):
for p in prefixes:
if value.startswith(p):
return True
return False
@aslist
def _get_system_users():
for p in pwd.getpwall():
yield User(
name=p.pw_name,
uid=p.pw_uid,
gid=p.pw_gid,
home=p.pw_dir
)
def get_system_users_status():
return UsersFacts(users=_get_system_users())
@aslist
def _get_system_groups():
for g in grp.getgrall():
yield Group(
name=g.gr_name,
gid=g.gr_gid,
members=g.gr_mem
)
def get_system_groups_status():
return GroupsFacts(groups=_get_system_groups())
@aslist
def _get_active_kernel_modules(logger):
lines = run(['lsmod'], split=True)['stdout']
for l in lines[1:]:
name = l.split(' ')[0]
base_path = '/sys/module/{module}'.format(module=name)
parameters_path = os.path.join(base_path, 'parameters')
if not os.path.exists(parameters_path):
yield ActiveKernelModule(filename=name, parameters=[])
continue
parameter_dict = {}
try:
signature = run(['modinfo', '-F', 'signature', name], split=False)['stdout']
except CalledProcessError:
signature = None
signature_string = None
if signature:
signature_string = re.sub(r"\s+", "", signature, flags=re.UNICODE)
# all the property names and then just read from all the listed paths
parameters = sorted(os.listdir(parameters_path))
for param in parameters:
try:
with open(os.path.join(parameters_path, param), mode='r') as fp:
parameter_dict[param] = fp.read().strip()
except IOError as exc:
# Some parameters are write-only, in that case we just log the name of parameter
# and the module and continue
if exc.errno in (errno.EACCES, errno.EPERM):
msg = 'Unable to read parameter "{param}" of kernel module "{name}"'
logger.warning(msg.format(param=param, name=name))
else:
raise exc
# Project the dictionary as a list of key values
items = [
KernelModuleParameter(name=k, value=v)
for (k, v) in six.iteritems(parameter_dict)
]
yield ActiveKernelModule(
filename=name,
parameters=items,
signature=signature_string
)
def get_active_kernel_modules_status(logger):
return ActiveKernelModulesFacts(kernel_modules=_get_active_kernel_modules(logger))
@aslist
def _get_sysctls():
unstable = ('fs.dentry-state', 'fs.file-nr', 'fs.inode-nr',
'fs.inode-state', 'kernel.random.uuid', 'kernel.random.entropy_avail',
'kernel.ns_last_pid', 'net.netfilter.nf_conntrack_count',
'net.netfilter.nf_conntrack_events', 'kernel.sched_domain.',
'dev.cdrom.info', 'kernel.pty.nr')
variables = []
for sc in run(['sysctl', '-a'], split=True)['stdout']:
name = sc.split(' ', 1)[0]
# if the sysctl name has an unstable prefix, we skip
if anyhasprefix(name, unstable):
continue
variables.append(sc)
# sort our variables so they can be diffed directly when needed
for var in sorted(variables):
name, value = tuple(map(type(var).strip, var.split('=')))
yield SysctlVariable(
name=name,
value=value
)
def get_sysctls_status():
return SysctlVariablesFacts(sysctl_variables=_get_sysctls())
@aslist
def _get_repositories():
def asbool(x):
return x == '1'
@aslist
def _parse(r):
with open(r, mode='r') as fp:
cp = configparser.ConfigParser()
cp.readfp(fp)
for section in cp.sections():
prepared = {'repoid': section, 'additional_fields': {}}
data = dict(cp.items(section))
for key in data.keys():
if key in RepositoryData.fields:
if isinstance(RepositoryData.fields[key], fields.Boolean):
data[key] = asbool(data[key])
prepared[key] = data[key]
else:
prepared['additional_fields'][key] = data[key]
prepared['additional_fields'] = json.dumps(prepared['additional_fields'])
yield RepositoryData(**prepared)
repos = run(
['find', '/etc/yum.repos.d/', '-type', 'f', '-name', '*.repo'],
split=True
)['stdout']
for repo in repos:
yield RepositoryFile(file=repo, data=_parse(repo))
def get_repositories_status():
return RepositoriesFacts(repositories=_get_repositories())
def get_selinux_status():
# will be None if something went wrong or contain SELinuxFacts otherwise
res = None
try:
import selinux
except ImportError:
api.report_error("SELinux Import Error", details="libselinux-python package must be installed.")
return res
outdata = dict({'enabled': selinux.is_selinux_enabled() == 1})
outdata['mls_enabled'] = selinux.is_selinux_mls_enabled() == 1
try:
outdata['runtime_mode'] = "enforcing" if selinux.security_getenforce() == 1 else "permissive"
# FIXME: check selinux_getenforcemode[0] (that should be return value of a underneath function)
enforce_mode = selinux.selinux_getenforcemode()[1]
if enforce_mode >= 0:
outdata['static_mode'] = "enforcing" if enforce_mode == 1 else "permissive"
else:
outdata['static_mode'] = "disabled"
outdata['policy'] = selinux.selinux_getpolicytype()[1]
except OSError:
# This happens when SELinux is disabled
# [Errno 2] No such file or directory
outdata['runtime_mode'] = 'permissive'
outdata['static_mode'] = 'disabled'
outdata['policy'] = 'targeted'
res = SELinuxFacts(**outdata)
return res
def get_firewalls_status():
logger = logging.getLogger('get_firewalld_status')
def _get_firewall_status(service_name):
try:
ret_list = run(['systemctl', 'is-active', service_name], split=True)['stdout']
active = ret_list[0] == 'active'
except CalledProcessError:
active = False
logger.debug('The %s service is likely not active', service_name)
try:
ret_list = run(['systemctl', 'is-enabled', service_name], split=True)['stdout']
enabled = ret_list[0] == 'enabled'
except CalledProcessError:
enabled = False
logger.debug('The %s service is likely not enabled nor running', service_name)
return FirewallStatus(
active=active,
enabled=enabled,
)
return FirewallsFacts(
firewalld=_get_firewall_status('firewalld'),
iptables=_get_firewall_status('iptables'),
ip6tables=_get_firewall_status('ip6tables'),
)
| true | true |
f731d41dd8efc8ab1fafc24f2e3c8da6336f6369 | 2,557 | py | Python | rusty/src/odom_pub.py | siddarth09/Rusty | 8d4c818869843ddd4d37f4e82f1203cd5566f8b7 | [
"MIT"
] | 3 | 2022-01-09T08:45:31.000Z | 2022-02-03T04:22:55.000Z | rusty/src/odom_pub.py | siddarth09/Rusty | 8d4c818869843ddd4d37f4e82f1203cd5566f8b7 | [
"MIT"
] | 2 | 2022-01-19T07:23:59.000Z | 2022-02-21T10:17:39.000Z | rusty/src/odom_pub.py | siddarth09/Rusty | 8d4c818869843ddd4d37f4e82f1203cd5566f8b7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# This scripts takes the odom from TF published by Cartographer and publishes it as an individual topic. Only required when used with real robot.
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Pose, Twist
import tf
#Node to handle calculating and publishing odometry
class odometry_publisher():
def __init__(self):
#initialize the node
rospy.init_node('odom_publisher')
# Transform listener
self.listener = tf.TransformListener()
# Odometry publisher
self.odom_publisher = rospy.Publisher('odom',Odometry,queue_size=1)
#Sleep rate
self.sleep_freq = 10.0
self.rate=rospy.Rate(self.sleep_freq)
# Required Constants
self.old_pose = self.get_init_pose()
print(self.old_pose)
def get_init_pose(self):
while True:
try:
old_pose = self.listener.lookupTransform('/odom', '/base_link', rospy.Time(0))
break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
return old_pose
def calc_velocity(self,new_pose):
#print(new_pose)
o = Odometry()
o.header.frame_id="odom"
o.child_frame_id="base_link"
"""
=======THIS IS WHERE CONVERSION HAPPENS=======
"""
o.twist.twist.linear.x = (new_pose[0][0] - self.old_pose[0][0])/self.sleep_freq
o.twist.twist.linear.y = -(new_pose[0][1] - self.old_pose[0][1])/self.sleep_freq
o.twist.twist.linear.z = (new_pose[0][2] - self.old_pose[0][2])/self.sleep_freq
o.twist.twist.angular.x = (new_pose[1][0] - self.old_pose[1][0])/self.sleep_freq
o.twist.twist.angular.y = (new_pose[1][1] - self.old_pose[1][1])/self.sleep_freq
o.twist.twist.angular.z = (new_pose[1][2] - self.old_pose[1][2])/self.sleep_freq
o.pose.pose.position.x = new_pose[0][0]
o.pose.pose.position.y = new_pose[0][1]
o.pose.pose.position.z = new_pose[0][2]
o.pose.pose.orientation.x = new_pose[1][0]
o.pose.pose.orientation.y = new_pose[1][1]
o.pose.pose.orientation.z = new_pose[1][2]
"""
===============================================
"""
self.old_pose = new_pose
return o
def run(self):
while not rospy.is_shutdown():
try:
#Get transform between frames
new_pose = self.listener.lookupTransform('/odom', '/base_link', rospy.Time(0))
#Process the transform obtained
odom_data = self.calc_velocity(new_pose)
#Publish odometry
self.odom_publisher.publish(odom_data)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
self.rate.sleep()
if __name__ == '__main__':
odometry_publisher().run()
| 30.440476 | 145 | 0.70043 |
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Pose, Twist
import tf
class odometry_publisher():
def __init__(self):
rospy.init_node('odom_publisher')
self.listener = tf.TransformListener()
self.odom_publisher = rospy.Publisher('odom',Odometry,queue_size=1)
self.sleep_freq = 10.0
self.rate=rospy.Rate(self.sleep_freq)
self.old_pose = self.get_init_pose()
print(self.old_pose)
def get_init_pose(self):
while True:
try:
old_pose = self.listener.lookupTransform('/odom', '/base_link', rospy.Time(0))
break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
return old_pose
def calc_velocity(self,new_pose):
o = Odometry()
o.header.frame_id="odom"
o.child_frame_id="base_link"
o.twist.twist.linear.x = (new_pose[0][0] - self.old_pose[0][0])/self.sleep_freq
o.twist.twist.linear.y = -(new_pose[0][1] - self.old_pose[0][1])/self.sleep_freq
o.twist.twist.linear.z = (new_pose[0][2] - self.old_pose[0][2])/self.sleep_freq
o.twist.twist.angular.x = (new_pose[1][0] - self.old_pose[1][0])/self.sleep_freq
o.twist.twist.angular.y = (new_pose[1][1] - self.old_pose[1][1])/self.sleep_freq
o.twist.twist.angular.z = (new_pose[1][2] - self.old_pose[1][2])/self.sleep_freq
o.pose.pose.position.x = new_pose[0][0]
o.pose.pose.position.y = new_pose[0][1]
o.pose.pose.position.z = new_pose[0][2]
o.pose.pose.orientation.x = new_pose[1][0]
o.pose.pose.orientation.y = new_pose[1][1]
o.pose.pose.orientation.z = new_pose[1][2]
self.old_pose = new_pose
return o
def run(self):
while not rospy.is_shutdown():
try:
new_pose = self.listener.lookupTransform('/odom', '/base_link', rospy.Time(0))
odom_data = self.calc_velocity(new_pose)
self.odom_publisher.publish(odom_data)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
self.rate.sleep()
if __name__ == '__main__':
odometry_publisher().run()
| true | true |
f731d4c0ca2482c5e81b997cb3a82b35636c343b | 18,733 | py | Python | google/cloud/datastore_admin_v1/services/datastore_admin/transports/grpc_asyncio.py | freelancing-solutions/python-datastore | 160f7751db6a2a27cdf2a5232eadc538de8a8268 | [
"Apache-2.0"
] | null | null | null | google/cloud/datastore_admin_v1/services/datastore_admin/transports/grpc_asyncio.py | freelancing-solutions/python-datastore | 160f7751db6a2a27cdf2a5232eadc538de8a8268 | [
"Apache-2.0"
] | 1 | 2020-04-01T09:22:47.000Z | 2020-04-01T12:02:43.000Z | google/cloud/datastore_admin_v1/services/datastore_admin/transports/grpc_asyncio.py | freelancing-solutions/python-datastore | 160f7751db6a2a27cdf2a5232eadc538de8a8268 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.datastore_admin_v1.types import datastore_admin
from google.cloud.datastore_admin_v1.types import index
from google.longrunning import operations_pb2 as operations # type: ignore
from .base import DatastoreAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatastoreAdminGrpcTransport
class DatastoreAdminGrpcAsyncIOTransport(DatastoreAdminTransport):
"""gRPC AsyncIO backend transport for DatastoreAdmin.
Google Cloud Datastore Admin API
The Datastore Admin API provides several admin services for
Cloud Datastore.
-----------------------------------------------------------------------------
## Concepts
Project, namespace, kind, and entity as defined in the Google
Cloud Datastore API.
Operation: An Operation represents work being performed in the
background.
EntityFilter: Allows specifying a subset of entities in a
project. This is specified as a combination of kinds and
namespaces (either or both of which may be all).
-----------------------------------------------------------------------------
## Services
# Export/Import
The Export/Import service provides the ability to copy all or a
subset of entities to/from Google Cloud Storage.
Exported data may be imported into Cloud Datastore for any
Google Cloud Platform project. It is not restricted to the
export source project. It is possible to export from one project
and then import into another.
Exported data can also be loaded into Google BigQuery for
analysis.
Exports and imports are performed asynchronously. An Operation
resource is created for each export/import. The state (including
any errors encountered) of the export/import may be queried via
the Operation resource.
# Index
The index service manages Cloud Datastore composite indexes.
Index creation and deletion are performed asynchronously. An
Operation resource is created for each such asynchronous
operation. The state of the operation (including any errors
encountered) may be queried via the Operation resource.
# Operation
The Operations collection provides a record of actions performed
for the specified project (including any operations in
progress). Operations are not created directly but through calls
on other collections or resources.
An operation that is not yet done may be cancelled. The request
to cancel is asynchronous and the operation may continue to run
for some time after the request to cancel is made.
An operation that is done may be deleted so that it is no longer
listed as part of the Operation collection.
ListOperations returns all pending operations, but not completed
operations.
Operations are created by service DatastoreAdmin,
but are accessed via service google.longrunning.Operations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
address (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
def __init__(
self,
*,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_channel_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
client_info=client_info,
)
self._stubs = {}
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if "operations_client" not in self.__dict__:
self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self.__dict__["operations_client"]
@property
def export_entities(
self,
) -> Callable[
[datastore_admin.ExportEntitiesRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the export entities method over gRPC.
Exports a copy of all or a subset of entities from
Google Cloud Datastore to another storage system, such
as Google Cloud Storage. Recent updates to entities may
not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed
via the Operation resource that is created. The output
of an export may only be used once the associated
operation is done. If an export operation is cancelled
before completion it may leave partial data behind in
Google Cloud Storage.
Returns:
Callable[[~.ExportEntitiesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_entities" not in self._stubs:
self._stubs["export_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ExportEntities",
request_serializer=datastore_admin.ExportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["export_entities"]
@property
def import_entities(
self,
) -> Callable[
[datastore_admin.ImportEntitiesRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the import entities method over gRPC.
Imports entities into Google Cloud Datastore.
Existing entities with the same key are overwritten. The
import occurs in the background and its progress can be
monitored and managed via the Operation resource that is
created. If an ImportEntities operation is cancelled, it
is possible that a subset of the data has already been
imported to Cloud Datastore.
Returns:
Callable[[~.ImportEntitiesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_entities" not in self._stubs:
self._stubs["import_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ImportEntities",
request_serializer=datastore_admin.ImportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["import_entities"]
@property
def get_index(
self,
) -> Callable[[datastore_admin.GetIndexRequest], Awaitable[index.Index]]:
r"""Return a callable for the get index method over gRPC.
Gets an index.
Returns:
Callable[[~.GetIndexRequest],
Awaitable[~.Index]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index" not in self._stubs:
self._stubs["get_index"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/GetIndex",
request_serializer=datastore_admin.GetIndexRequest.serialize,
response_deserializer=index.Index.deserialize,
)
return self._stubs["get_index"]
@property
def list_indexes(
self,
) -> Callable[
[datastore_admin.ListIndexesRequest],
Awaitable[datastore_admin.ListIndexesResponse],
]:
r"""Return a callable for the list indexes method over gRPC.
Lists the indexes that match the specified filters.
Datastore uses an eventually consistent query to fetch
the list of indexes and may occasionally return stale
results.
Returns:
Callable[[~.ListIndexesRequest],
Awaitable[~.ListIndexesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_indexes" not in self._stubs:
self._stubs["list_indexes"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ListIndexes",
request_serializer=datastore_admin.ListIndexesRequest.serialize,
response_deserializer=datastore_admin.ListIndexesResponse.deserialize,
)
return self._stubs["list_indexes"]
__all__ = ("DatastoreAdminGrpcAsyncIOTransport",)
| 42.286682 | 87 | 0.644745 |
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google import auth
from google.auth import credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from grpc.experimental import aio
from google.cloud.datastore_admin_v1.types import datastore_admin
from google.cloud.datastore_admin_v1.types import index
from google.longrunning import operations_pb2 as operations
from .base import DatastoreAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatastoreAdminGrpcTransport
class DatastoreAdminGrpcAsyncIOTransport(DatastoreAdminTransport):
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
def __init__(
self,
*,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_channel_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
client_info=client_info,
)
self._stubs = {}
@property
def grpc_channel(self) -> aio.Channel:
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
if "operations_client" not in self.__dict__:
self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
return self.__dict__["operations_client"]
@property
def export_entities(
self,
) -> Callable[
[datastore_admin.ExportEntitiesRequest], Awaitable[operations.Operation]
]:
if "export_entities" not in self._stubs:
self._stubs["export_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ExportEntities",
request_serializer=datastore_admin.ExportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["export_entities"]
@property
def import_entities(
self,
) -> Callable[
[datastore_admin.ImportEntitiesRequest], Awaitable[operations.Operation]
]:
if "import_entities" not in self._stubs:
self._stubs["import_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ImportEntities",
request_serializer=datastore_admin.ImportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["import_entities"]
@property
def get_index(
self,
) -> Callable[[datastore_admin.GetIndexRequest], Awaitable[index.Index]]:
if "get_index" not in self._stubs:
self._stubs["get_index"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/GetIndex",
request_serializer=datastore_admin.GetIndexRequest.serialize,
response_deserializer=index.Index.deserialize,
)
return self._stubs["get_index"]
@property
def list_indexes(
self,
) -> Callable[
[datastore_admin.ListIndexesRequest],
Awaitable[datastore_admin.ListIndexesResponse],
]:
if "list_indexes" not in self._stubs:
self._stubs["list_indexes"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ListIndexes",
request_serializer=datastore_admin.ListIndexesRequest.serialize,
response_deserializer=datastore_admin.ListIndexesResponse.deserialize,
)
return self._stubs["list_indexes"]
__all__ = ("DatastoreAdminGrpcAsyncIOTransport",)
| true | true |
f731d7163a949e08b78c987d882201df17fa56fc | 6,194 | py | Python | keras/feature_column/dense_features_v2.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | keras/feature_column/dense_features_v2.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | keras/feature_column/dense_features_v2.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.feature_column import base_feature_layer as kfc
from keras.feature_column import dense_features
from keras.utils import tf_contextlib
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.DenseFeatures", v1=[])
class DenseFeatures(dense_features.DenseFeatures):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with
FeatureColumns. At the first layer of the model, this column oriented data
should be converted to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V2 version of this layer that uses name_scopes to create
variables instead of variable_scopes. But this approach currently lacks
support for partitioned variables. In that case, use the V1 version instead.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords",
10000),
dimensions=16)
columns = [price, keywords_embedded, ...]
feature_layer = tf.keras.layers.DenseFeatures(columns)
features = tf.io.parse_example(
..., features=tf.feature_column.make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.keras.layers.Dense(units, activation='relu')(
dense_tensor)
prediction = tf.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self, feature_columns, trainable=True, name=None, **kwargs):
"""Creates a DenseFeatures object.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes
derived from `DenseColumn` such as `numeric_column`,
`embedding_column`, `bucketized_column`, `indicator_column`. If you
have categorical features, you can wrap them with an
`embedding_column` or `indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super().__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
**kwargs
)
self._state_manager = _StateManagerImplV2(self, self.trainable)
def build(self, _):
for column in self._feature_columns:
with tf.name_scope(column.name):
column.create_state(self._state_manager)
# We would like to call Layer.build and not _DenseFeaturesHelper.build.
super(kfc._BaseFeaturesLayer, self).build(None)
class _StateManagerImplV2(tf.__internal__.feature_column.StateManager):
"""Manages the state of DenseFeatures."""
def create_variable(
self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None,
):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError("Variable already exists.")
# We explicitly track these variables since `name` is not guaranteed to
# be unique and disable manual tracking that the add_weight call does.
with no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
)
if isinstance(var, tf.__internal__.tracking.Trackable):
self._layer._track_trackable(var, feature_column.name + "/" + name)
self._cols_to_vars_map[feature_column][name] = var
return var
@tf_contextlib.contextmanager
def no_manual_dependency_tracking_scope(obj):
"""A context that disables manual dependency tracking for the given `obj`.
Sometimes library methods might track objects on their own and we might want
to disable that and do the tracking on our own. One can then use this
context manager to disable the tracking the library method does and do your
own tracking.
For example:
class TestLayer(tf.keras.Layer):
def build():
with no_manual_dependency_tracking_scope(self):
var = self.add_weight("name1") # Creates a var and doesn't track it
# We track variable with name `name2`
self._track_trackable("name2", var)
Args:
obj: A trackable object.
Yields:
a scope in which the object doesn't track dependencies manually.
"""
previous_value = getattr(obj, "_manual_tracking", True)
obj._manual_tracking = False
try:
yield
finally:
obj._manual_tracking = previous_value
| 37.539394 | 80 | 0.674524 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.feature_column import base_feature_layer as kfc
from keras.feature_column import dense_features
from keras.utils import tf_contextlib
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.DenseFeatures", v1=[])
class DenseFeatures(dense_features.DenseFeatures):
def __init__(self, feature_columns, trainable=True, name=None, **kwargs):
super().__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
**kwargs
)
self._state_manager = _StateManagerImplV2(self, self.trainable)
def build(self, _):
for column in self._feature_columns:
with tf.name_scope(column.name):
column.create_state(self._state_manager)
super(kfc._BaseFeaturesLayer, self).build(None)
class _StateManagerImplV2(tf.__internal__.feature_column.StateManager):
def create_variable(
self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None,
):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError("Variable already exists.")
with no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
)
if isinstance(var, tf.__internal__.tracking.Trackable):
self._layer._track_trackable(var, feature_column.name + "/" + name)
self._cols_to_vars_map[feature_column][name] = var
return var
@tf_contextlib.contextmanager
def no_manual_dependency_tracking_scope(obj):
previous_value = getattr(obj, "_manual_tracking", True)
obj._manual_tracking = False
try:
yield
finally:
obj._manual_tracking = previous_value
| true | true |
f731d83cb3bd2b8583121ddf5531aba8604bc008 | 14,005 | py | Python | main_old.py | umidjon-userbot/tgvcv | 78f461bf2e68902d6a9ab4b5d06aca6840c1844f | [
"MIT"
] | null | null | null | main_old.py | umidjon-userbot/tgvcv | 78f461bf2e68902d6a9ab4b5d06aca6840c1844f | [
"MIT"
] | null | null | null | main_old.py | umidjon-userbot/tgvcv | 78f461bf2e68902d6a9ab4b5d06aca6840c1844f | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import os
import asyncio
import subprocess
import youtube_dl
from Python_ARQ import ARQ
from pytgcalls import GroupCall
from sys import version as pyver
from pyrogram import Client, filters
from misc import HELP_TEXT, START_TEXT, REPO_TEXT
from functions import (
transcode,
download_and_transcode_song,
convert_seconds,
time_to_seconds,
generate_cover,
generate_cover_square,
)
# TODO Make it look less messed up
is_config = os.path.exists("config.py")
if is_config:
from config import (
API_ID, API_HASH,
SUDO_CHAT_ID,
SUDOERS, ARQ_API, HEROKU
)
elif not is_config:
from sample_config import (
API_ID, API_HASH,
SUDO_CHAT_ID,
SUDOERS, ARQ_API, HEROKU
)
if HEROKU:
if is_config:
from config import SESSION_STRING
elif not is_config:
from sample_config import SESSION_STRING
queue = [] # This is where the whole song queue is stored
playing = False # Tells if something is playing or not
# Pyrogram Client
if not HEROKU:
app = Client("tgvc", api_id=API_ID, api_hash=API_HASH)
else:
app = Client(SESSION_STRING, api_id=API_ID, api_hash=API_HASH)
# Pytgcalls Client
vc = GroupCall(
client=app,
input_filename="input.raw",
play_on_repeat=True,
enable_logs_to_console=False,
)
# Arq Client
arq = ARQ(ARQ_API)
async def delete(message):
await asyncio.sleep(10)
await message.delete()
@app.on_message(filters.command("start") & filters.user(SUDOERS))
async def start(_, message):
await send(START_TEXT)
@app.on_message(filters.command("help") & filters.user(SUDOERS))
async def help(_, message):
await send(HELP_TEXT)
@app.on_message(filters.command("repo") & filters.user(SUDOERS))
async def repo(_, message):
await send(REPO_TEXT)
@app.on_message(filters.command("joinvc") & filters.user(SUDOERS))
async def joinvc(_, message):
try:
if vc.is_connected:
await send("__**Allaqachon ovozli chatdaman.**__")
return
chat_id = message.chat.id
await vc.start(chat_id)
await send("__**Ovozli chatga qo'shildim.**__")
except Exception as e:
print(str(e))
await send(str(e))
@app.on_message(filters.command("rejoinvc") & filters.user(SUDOERS))
async def joinvc(_, message):
try:
if vc.is_connected:
await send("__**Allaqachon ovozli chatdaman.**__")
return
chat_id = message.chat.id
await vc.reconnect()
await send("__**Ovozli chatga qo'shildim.**__")
except Exception as e:
print(str(e))
await send(str(e))
@app.on_message(filters.command("leavevc") & filters.user(SUDOERS))
async def leavevc(_, message):
if not vc.is_connected:
await send("__**Ovozli chatdan allaqachon chiqib ketganman.**__")
return
await vc.leave_current_group_call()
await vc.stop()
await send("__**Ovozli chatni tark etdim , yangilanish....**__")
os.execvp(
f"python{str(pyver.split(' ')[0])[:3]}",
[f"python{str(pyver.split(' ')[0])[:3]}", "main.py"],
)
@app.on_message(filters.command("update") & filters.user(SUDOERS))
async def update_restart(_, message):
await send(
f'```{subprocess.check_output(["git", "pull"]).decode("UTF-8")}```'
)
os.execvp(
f"python{str(pyver.split(' ')[0])[:3]}",
[f"python{str(pyver.split(' ')[0])[:3]}", "main.py"],
)
@app.on_message(filters.command("pause") & filters.user(SUDOERS))
async def pause_song(_, message):
vc.pause_playout()
await send("**To'xtatildi, davom ettirish uchun /resume buyrug'ini bering.**")
@app.on_message(filters.command("resume") & filters.chat(SUDO_CHAT_ID))
async def resume_song(_, message):
vc.resume_playout()
await send("**Davom etmoqda, to'xtatish uchun /pause buyrug'ini bering.**")
@app.on_message(filters.command("volume") & filters.user(SUDOERS))
async def volume_bot(_, message):
usage = "**Ishlatish uchun:**\n/volume [1-200] yozing"
if len(message.command) != 2:
await send(usage)
return
volume = int(message.text.split(None, 1)[1])
if (volume < 1) or (volume > 200):
await send(usage)
return
try:
await vc.set_my_volume(volume=volume)
except ValueError:
await send(usage)
return
await send(f"**Volume Set To {volume}**")
@app.on_message(filters.command("play") & filters.chat(SUDO_CHAT_ID))
async def queuer(_, message):
usage = "**Usage:**\n__**/play youtube Qo'shiq_Nomi**__"
if len(message.command) < 3:
await send(usage)
return
text = message.text.split(None, 2)[1:]
service = text[0].lower()
song_name = text[1]
requested_by = message.from_user.first_name
services = ["youtube", "deezer", "saavn"]
if service not in services:
await send(usage)
return
if len(queue) > 0:
await message.delete()
await send("__**Navbatga qo'shdim.__**")
queue.append(
{
"service": service,
"song": song_name,
"requested_by": requested_by,
}
)
await play()
return
await message.delete()
queue.append(
{
"service": service,
"song": song_name,
"requested_by": requested_by,
}
)
await play()
@app.on_message(
filters.command("skip") & filters.user(SUDOERS) & ~filters.edited
)
async def skip(_, message):
global playing
if len(queue) == 0:
await send("__**Navbat bo'm-bo'sh.**__")
return
playing = False
await send("__**Keyingisiga o'tkazildi!**__")
await play()
@app.on_message(filters.command("queue") & filters.chat(SUDO_CHAT_ID))
async def queue_list(_, message):
if len(queue) != 0:
i = 1
text = ""
for song in queue:
text += f"**{i}. Platforma:** __**{song['service']}**__ " \
+ f"| **Musiqa:** __**{song['song']}**__\n"
i += 1
m = await send(text)
await delete(message)
await m.delete()
else:
m = await send("__**Navbatda musiqa yo'q.**__")
await delete(message)
await m.delete()
# Queue handler
async def play():
global queue, playing
while not playing:
await asyncio.sleep(2)
if len(queue) != 0:
service = queue[0]["service"]
song = queue[0]["song"]
requested_by = queue[0]["requested_by"]
if service == "youtube":
playing = True
del queue[0]
try:
await ytplay(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
elif service == "saavn":
playing = True
del queue[0]
try:
await jiosaavn(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
elif service == "deezer":
playing = True
del queue[0]
try:
await deezer(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
# Deezer----------------------------------------------------------------------------------------
async def deezer(requested_by, query):
global playing
m = await send(f"__**Searching for {query} on Deezer.**__")
try:
songs = await arq.deezer(query, 1)
title = songs[0].title
duration = convert_seconds(int(songs[0].duration))
thumbnail = songs[0].thumbnail
artist = songs[0].artist
url = songs[0].url
except Exception:
await m.edit("__**Found No Song Matching Your Query.**__")
playing = False
return
await m.edit("__**Generating Thumbnail.**__")
await generate_cover_square(
requested_by, title, artist, duration, thumbnail
)
await m.edit("__**Downloading And Transcoding.**__")
await download_and_transcode_song(url)
await m.delete()
caption = f"🏷 **Name:** [{title[:35]}]({url})\n⏳ **Duration:** {duration}\n" \
+ f"🎧 **Requested By:** {requested_by}\n📡 **Platform:** Deezer"
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
photo="final.png",
caption=caption,
)
os.remove("final.png")
await asyncio.sleep(int(songs[0]["duration"]))
await m.delete()
playing = False
# Jiosaavn--------------------------------------------------------------------------------------
async def jiosaavn(requested_by, query):
global playing
m = await send(f"__**Searching for {query} on JioSaavn.**__")
try:
songs = await arq.saavn(query)
sname = songs[0].song
slink = songs[0].media_url
ssingers = songs[0].singers
sthumb = songs[0].image
sduration = songs[0].duration
sduration_converted = convert_seconds(int(sduration))
except Exception as e:
await m.edit("__**Found No Song Matching Your Query.**__")
print(str(e))
playing = False
return
await m.edit("__**Processing Thumbnail.**__")
await generate_cover_square(
requested_by, sname, ssingers, sduration_converted, sthumb
)
await m.edit("__**Downloading And Transcoding.**__")
await download_and_transcode_song(slink)
await m.delete()
caption = f"🏷 **Name:** {sname[:35]}\n⏳ **Duration:** {sduration_converted}\n" \
+ f"🎧 **Requested By:** {requested_by}\n📡 **Platform:** JioSaavn"
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
caption=caption,
photo="final.png",
)
os.remove("final.png")
await asyncio.sleep(int(sduration))
await m.delete()
playing = False
# Youtube Play-----------------------------------------------------
async def ytplay(requested_by, query):
global playing
ydl_opts = {"format": "bestaudio"}
m = await send(f"__**{query} YouTubedan izlanmoqda.**__")
try:
results = await arq.youtube(query)
link = f"https://youtube.com{results[0].url_suffix}"
title = results[0].title
thumbnail = results[0].thumbnails[0]
duration = results[0].duration
views = results[0].views
await app.update_profile(first_name=f"🔉{title} ",bio = f"{title} ijro etilmoqda")
if time_to_seconds(duration) >= 1800:
await m.edit("__**Yo'q, faqat 30 daqiqadan oshmagan musiqalar mumkin.**__")
playing = False
return
except Exception as e:
await m.edit("__**Siz izlagan musiqa topilmadi.**__")
playing = False
print(str(e))
return
await m.edit("__**1 soniya.**__")
await generate_cover(requested_by, title, views, duration, thumbnail)
await m.edit("__**yuklanmoqda ....**__")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
await m.edit("__**1 soniya.**__")
os.rename(audio_file, "audio.webm")
transcode("audio.webm")
await m.delete()
caption = f"🏷 **Nomi:** [{title[:35]}]({link})\n⏳ **Davomiyligi:** {duration}\n" \
+ f"🎧 {requested_by} **tomonidan ijro etildi**\n📡 **Platforma:** YouTube"
await app.set_profile_photo(photo="final.png")
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
caption=caption,
photo="final.png",
)
os.remove("final.png")
await asyncio.sleep(int(time_to_seconds(duration)))
playing = False
await m.delete()
# Telegram Audio------------------------------------
@app.on_message(
filters.command("telegram") & filters.user(SUDOERS) & ~filters.edited
)
async def tgplay(_, message):
global playing
if len(queue) != 0:
await send("__**You Can Only Play Telegram Files After The Queue Gets "
+ "Finished.**__")
return
if not message.reply_to_message:
await send("__**Reply to an audio.**__")
return
if message.reply_to_message.audio:
if int(message.reply_to_message.audio.file_size) >= 104857600:
await send("__**Bruh! Only songs within 100 MB.**__")
playing = False
return
duration = message.reply_to_message.audio.duration
if not duration:
await send("__**Only Songs With Duration Are Supported.**__")
return
m = await send("__**Downloading.**__")
song = await message.reply_to_message.download()
await m.edit("__**Transcoding.**__")
transcode(song)
await m.edit(f"**Playing** __**{message.reply_to_message.link}.**__")
await asyncio.sleep(duration)
playing = False
return
await send("__**Only Audio Files (Not Document) Are Supported.**__")
async def send(text):
m = await app.send_message(
SUDO_CHAT_ID, text=text, disable_web_page_preview=True
)
return m
print(
"\nBot Starting..."
)
app.run()
| 30.645514 | 97 | 0.565084 | from __future__ import unicode_literals
import os
import asyncio
import subprocess
import youtube_dl
from Python_ARQ import ARQ
from pytgcalls import GroupCall
from sys import version as pyver
from pyrogram import Client, filters
from misc import HELP_TEXT, START_TEXT, REPO_TEXT
from functions import (
transcode,
download_and_transcode_song,
convert_seconds,
time_to_seconds,
generate_cover,
generate_cover_square,
)
is_config = os.path.exists("config.py")
if is_config:
from config import (
API_ID, API_HASH,
SUDO_CHAT_ID,
SUDOERS, ARQ_API, HEROKU
)
elif not is_config:
from sample_config import (
API_ID, API_HASH,
SUDO_CHAT_ID,
SUDOERS, ARQ_API, HEROKU
)
if HEROKU:
if is_config:
from config import SESSION_STRING
elif not is_config:
from sample_config import SESSION_STRING
queue = []
playing = False
if not HEROKU:
app = Client("tgvc", api_id=API_ID, api_hash=API_HASH)
else:
app = Client(SESSION_STRING, api_id=API_ID, api_hash=API_HASH)
vc = GroupCall(
client=app,
input_filename="input.raw",
play_on_repeat=True,
enable_logs_to_console=False,
)
arq = ARQ(ARQ_API)
async def delete(message):
await asyncio.sleep(10)
await message.delete()
@app.on_message(filters.command("start") & filters.user(SUDOERS))
async def start(_, message):
await send(START_TEXT)
@app.on_message(filters.command("help") & filters.user(SUDOERS))
async def help(_, message):
await send(HELP_TEXT)
@app.on_message(filters.command("repo") & filters.user(SUDOERS))
async def repo(_, message):
await send(REPO_TEXT)
@app.on_message(filters.command("joinvc") & filters.user(SUDOERS))
async def joinvc(_, message):
try:
if vc.is_connected:
await send("__**Allaqachon ovozli chatdaman.**__")
return
chat_id = message.chat.id
await vc.start(chat_id)
await send("__**Ovozli chatga qo'shildim.**__")
except Exception as e:
print(str(e))
await send(str(e))
@app.on_message(filters.command("rejoinvc") & filters.user(SUDOERS))
async def joinvc(_, message):
try:
if vc.is_connected:
await send("__**Allaqachon ovozli chatdaman.**__")
return
chat_id = message.chat.id
await vc.reconnect()
await send("__**Ovozli chatga qo'shildim.**__")
except Exception as e:
print(str(e))
await send(str(e))
@app.on_message(filters.command("leavevc") & filters.user(SUDOERS))
async def leavevc(_, message):
if not vc.is_connected:
await send("__**Ovozli chatdan allaqachon chiqib ketganman.**__")
return
await vc.leave_current_group_call()
await vc.stop()
await send("__**Ovozli chatni tark etdim , yangilanish....**__")
os.execvp(
f"python{str(pyver.split(' ')[0])[:3]}",
[f"python{str(pyver.split(' ')[0])[:3]}", "main.py"],
)
@app.on_message(filters.command("update") & filters.user(SUDOERS))
async def update_restart(_, message):
await send(
f'```{subprocess.check_output(["git", "pull"]).decode("UTF-8")}```'
)
os.execvp(
f"python{str(pyver.split(' ')[0])[:3]}",
[f"python{str(pyver.split(' ')[0])[:3]}", "main.py"],
)
@app.on_message(filters.command("pause") & filters.user(SUDOERS))
async def pause_song(_, message):
vc.pause_playout()
await send("**To'xtatildi, davom ettirish uchun /resume buyrug'ini bering.**")
@app.on_message(filters.command("resume") & filters.chat(SUDO_CHAT_ID))
async def resume_song(_, message):
vc.resume_playout()
await send("**Davom etmoqda, to'xtatish uchun /pause buyrug'ini bering.**")
@app.on_message(filters.command("volume") & filters.user(SUDOERS))
async def volume_bot(_, message):
usage = "**Ishlatish uchun:**\n/volume [1-200] yozing"
if len(message.command) != 2:
await send(usage)
return
volume = int(message.text.split(None, 1)[1])
if (volume < 1) or (volume > 200):
await send(usage)
return
try:
await vc.set_my_volume(volume=volume)
except ValueError:
await send(usage)
return
await send(f"**Volume Set To {volume}**")
@app.on_message(filters.command("play") & filters.chat(SUDO_CHAT_ID))
async def queuer(_, message):
usage = "**Usage:**\n__**/play youtube Qo'shiq_Nomi**__"
if len(message.command) < 3:
await send(usage)
return
text = message.text.split(None, 2)[1:]
service = text[0].lower()
song_name = text[1]
requested_by = message.from_user.first_name
services = ["youtube", "deezer", "saavn"]
if service not in services:
await send(usage)
return
if len(queue) > 0:
await message.delete()
await send("__**Navbatga qo'shdim.__**")
queue.append(
{
"service": service,
"song": song_name,
"requested_by": requested_by,
}
)
await play()
return
await message.delete()
queue.append(
{
"service": service,
"song": song_name,
"requested_by": requested_by,
}
)
await play()
@app.on_message(
filters.command("skip") & filters.user(SUDOERS) & ~filters.edited
)
async def skip(_, message):
global playing
if len(queue) == 0:
await send("__**Navbat bo'm-bo'sh.**__")
return
playing = False
await send("__**Keyingisiga o'tkazildi!**__")
await play()
@app.on_message(filters.command("queue") & filters.chat(SUDO_CHAT_ID))
async def queue_list(_, message):
if len(queue) != 0:
i = 1
text = ""
for song in queue:
text += f"**{i}. Platforma:** __**{song['service']}**__ " \
+ f"| **Musiqa:** __**{song['song']}**__\n"
i += 1
m = await send(text)
await delete(message)
await m.delete()
else:
m = await send("__**Navbatda musiqa yo'q.**__")
await delete(message)
await m.delete()
async def play():
global queue, playing
while not playing:
await asyncio.sleep(2)
if len(queue) != 0:
service = queue[0]["service"]
song = queue[0]["song"]
requested_by = queue[0]["requested_by"]
if service == "youtube":
playing = True
del queue[0]
try:
await ytplay(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
elif service == "saavn":
playing = True
del queue[0]
try:
await jiosaavn(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
elif service == "deezer":
playing = True
del queue[0]
try:
await deezer(requested_by, song)
except Exception as e:
print(str(e))
await send(str(e))
playing = False
pass
async def deezer(requested_by, query):
global playing
m = await send(f"__**Searching for {query} on Deezer.**__")
try:
songs = await arq.deezer(query, 1)
title = songs[0].title
duration = convert_seconds(int(songs[0].duration))
thumbnail = songs[0].thumbnail
artist = songs[0].artist
url = songs[0].url
except Exception:
await m.edit("__**Found No Song Matching Your Query.**__")
playing = False
return
await m.edit("__**Generating Thumbnail.**__")
await generate_cover_square(
requested_by, title, artist, duration, thumbnail
)
await m.edit("__**Downloading And Transcoding.**__")
await download_and_transcode_song(url)
await m.delete()
caption = f"🏷 **Name:** [{title[:35]}]({url})\n⏳ **Duration:** {duration}\n" \
+ f"🎧 **Requested By:** {requested_by}\n📡 **Platform:** Deezer"
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
photo="final.png",
caption=caption,
)
os.remove("final.png")
await asyncio.sleep(int(songs[0]["duration"]))
await m.delete()
playing = False
async def jiosaavn(requested_by, query):
global playing
m = await send(f"__**Searching for {query} on JioSaavn.**__")
try:
songs = await arq.saavn(query)
sname = songs[0].song
slink = songs[0].media_url
ssingers = songs[0].singers
sthumb = songs[0].image
sduration = songs[0].duration
sduration_converted = convert_seconds(int(sduration))
except Exception as e:
await m.edit("__**Found No Song Matching Your Query.**__")
print(str(e))
playing = False
return
await m.edit("__**Processing Thumbnail.**__")
await generate_cover_square(
requested_by, sname, ssingers, sduration_converted, sthumb
)
await m.edit("__**Downloading And Transcoding.**__")
await download_and_transcode_song(slink)
await m.delete()
caption = f"🏷 **Name:** {sname[:35]}\n⏳ **Duration:** {sduration_converted}\n" \
+ f"🎧 **Requested By:** {requested_by}\n📡 **Platform:** JioSaavn"
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
caption=caption,
photo="final.png",
)
os.remove("final.png")
await asyncio.sleep(int(sduration))
await m.delete()
playing = False
async def ytplay(requested_by, query):
global playing
ydl_opts = {"format": "bestaudio"}
m = await send(f"__**{query} YouTubedan izlanmoqda.**__")
try:
results = await arq.youtube(query)
link = f"https://youtube.com{results[0].url_suffix}"
title = results[0].title
thumbnail = results[0].thumbnails[0]
duration = results[0].duration
views = results[0].views
await app.update_profile(first_name=f"🔉{title} ",bio = f"{title} ijro etilmoqda")
if time_to_seconds(duration) >= 1800:
await m.edit("__**Yo'q, faqat 30 daqiqadan oshmagan musiqalar mumkin.**__")
playing = False
return
except Exception as e:
await m.edit("__**Siz izlagan musiqa topilmadi.**__")
playing = False
print(str(e))
return
await m.edit("__**1 soniya.**__")
await generate_cover(requested_by, title, views, duration, thumbnail)
await m.edit("__**yuklanmoqda ....**__")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
await m.edit("__**1 soniya.**__")
os.rename(audio_file, "audio.webm")
transcode("audio.webm")
await m.delete()
caption = f"🏷 **Nomi:** [{title[:35]}]({link})\n⏳ **Davomiyligi:** {duration}\n" \
+ f"🎧 {requested_by} **tomonidan ijro etildi**\n📡 **Platforma:** YouTube"
await app.set_profile_photo(photo="final.png")
m = await app.send_photo(
chat_id=SUDO_CHAT_ID,
caption=caption,
photo="final.png",
)
os.remove("final.png")
await asyncio.sleep(int(time_to_seconds(duration)))
playing = False
await m.delete()
# Telegram Audio------------------------------------
@app.on_message(
filters.command("telegram") & filters.user(SUDOERS) & ~filters.edited
)
async def tgplay(_, message):
global playing
if len(queue) != 0:
await send("__**You Can Only Play Telegram Files After The Queue Gets "
+ "Finished.**__")
return
if not message.reply_to_message:
await send("__**Reply to an audio.**__")
return
if message.reply_to_message.audio:
if int(message.reply_to_message.audio.file_size) >= 104857600:
await send("__**Bruh! Only songs within 100 MB.**__")
playing = False
return
duration = message.reply_to_message.audio.duration
if not duration:
await send("__**Only Songs With Duration Are Supported.**__")
return
m = await send("__**Downloading.**__")
song = await message.reply_to_message.download()
await m.edit("__**Transcoding.**__")
transcode(song)
await m.edit(f"**Playing** __**{message.reply_to_message.link}.**__")
await asyncio.sleep(duration)
playing = False
return
await send("__**Only Audio Files (Not Document) Are Supported.**__")
async def send(text):
m = await app.send_message(
SUDO_CHAT_ID, text=text, disable_web_page_preview=True
)
return m
print(
"\nBot Starting..."
)
app.run()
| true | true |
f731d84f286ed80e65155cc5debd5fca99760dfb | 186 | py | Python | 2_line_notify/src/config_sample.py | tenpa1105/m5camera_mpy_samples | a47d6eca951fc6298794226f51c20408d7c2d64c | [
"MIT"
] | null | null | null | 2_line_notify/src/config_sample.py | tenpa1105/m5camera_mpy_samples | a47d6eca951fc6298794226f51c20408d7c2d64c | [
"MIT"
] | null | null | null | 2_line_notify/src/config_sample.py | tenpa1105/m5camera_mpy_samples | a47d6eca951fc6298794226f51c20408d7c2d64c | [
"MIT"
] | null | null | null | WIFI_SSID = "please enter your wifi ssid"
WIFI_PASS = "please enter your wifi password"
LINE_TOKEN= "please enter your line notify token"
MOVING_DET_DIFF = 10
MOVING_DET_LIGHT_MAX = 200 | 37.2 | 50 | 0.795699 | WIFI_SSID = "please enter your wifi ssid"
WIFI_PASS = "please enter your wifi password"
LINE_TOKEN= "please enter your line notify token"
MOVING_DET_DIFF = 10
MOVING_DET_LIGHT_MAX = 200 | true | true |
f731d88c245e18608de7478443566fc03ff583a3 | 23,559 | py | Python | sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_pipeline_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_pipeline_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_pipeline_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PipelineOperations(object):
"""PipelineOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_pipelines_by_workspace(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.PipelineListResponse"]
"""Lists pipelines.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PipelineListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.synapse.artifacts.models.PipelineListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PipelineListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_pipelines_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PipelineListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.CloudError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_pipelines_by_workspace.metadata = {'url': '/pipelines'} # type: ignore
def _create_or_update_pipeline_initial(
self,
pipeline_name, # type: str
pipeline, # type: "models.PipelineResource"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["models.PipelineResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.PipelineResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_pipeline_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(pipeline, 'PipelineResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PipelineResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_pipeline_initial.metadata = {'url': '/pipelines/{pipelineName}'} # type: ignore
def begin_create_or_update_pipeline(
self,
pipeline_name, # type: str
pipeline, # type: "models.PipelineResource"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.PipelineResource"]
"""Creates or updates a pipeline.
:param pipeline_name: The pipeline name.
:type pipeline_name: str
:param pipeline: Pipeline resource definition.
:type pipeline: ~azure.synapse.artifacts.models.PipelineResource
:param if_match: ETag of the pipeline entity. Should only be specified for update, for which
it should match existing entity or can be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PipelineResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.PipelineResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PipelineResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_pipeline_initial(
pipeline_name=pipeline_name,
pipeline=pipeline,
if_match=if_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PipelineResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_pipeline.metadata = {'url': '/pipelines/{pipelineName}'} # type: ignore
def get_pipeline(
self,
pipeline_name, # type: str
if_none_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["models.PipelineResource"]
"""Gets a pipeline.
:param pipeline_name: The pipeline name.
:type pipeline_name: str
:param if_none_match: ETag of the pipeline entity. Should only be specified for get. If the
ETag matches the existing entity tag, or if * was provided, then no content will be returned.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PipelineResource, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.PipelineResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.PipelineResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_pipeline.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PipelineResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_pipeline.metadata = {'url': '/pipelines/{pipelineName}'} # type: ignore
def _delete_pipeline_initial(
self,
pipeline_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_pipeline_initial.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
_delete_pipeline_initial.metadata = {'url': '/pipelines/{pipelineName}'} # type: ignore
def begin_delete_pipeline(
self,
pipeline_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a pipeline.
:param pipeline_name: The pipeline name.
:type pipeline_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_pipeline_initial(
pipeline_name=pipeline_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_pipeline.metadata = {'url': '/pipelines/{pipelineName}'} # type: ignore
def create_pipeline_run(
self,
pipeline_name, # type: str
reference_pipeline_run_id=None, # type: Optional[str]
is_recovery=None, # type: Optional[bool]
start_activity_name=None, # type: Optional[str]
parameters=None, # type: Optional[Dict[str, object]]
**kwargs # type: Any
):
# type: (...) -> "models.CreateRunResponse"
"""Creates a run of a pipeline.
:param pipeline_name: The pipeline name.
:type pipeline_name: str
:param reference_pipeline_run_id: The pipeline run identifier. If run ID is specified the
parameters of the specified run will be used to create a new run.
:type reference_pipeline_run_id: str
:param is_recovery: Recovery mode flag. If recovery mode is set to true, the specified
referenced pipeline run and the new run will be grouped under the same groupId.
:type is_recovery: bool
:param start_activity_name: In recovery mode, the rerun will start from this activity. If not
specified, all activities will run.
:type start_activity_name: str
:param parameters: Parameters of the pipeline run. These parameters will be used only if the
runId is not specified.
:type parameters: dict[str, object]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CreateRunResponse, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.CreateRunResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CreateRunResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_pipeline_run.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if reference_pipeline_run_id is not None:
query_parameters['referencePipelineRunId'] = self._serialize.query("reference_pipeline_run_id", reference_pipeline_run_id, 'str')
if is_recovery is not None:
query_parameters['isRecovery'] = self._serialize.query("is_recovery", is_recovery, 'bool')
if start_activity_name is not None:
query_parameters['startActivityName'] = self._serialize.query("start_activity_name", start_activity_name, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, '{object}')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CreateRunResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_pipeline_run.metadata = {'url': '/pipelines/{pipelineName}/createRun'} # type: ignore
| 47.690283 | 161 | 0.651259 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PipelineOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_pipelines_by_workspace(
self,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.get_pipelines_by_workspace.metadata['url']
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PipelineListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.CloudError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_pipelines_by_workspace.metadata = {'url': '/pipelines'}
def _create_or_update_pipeline_initial(
self,
pipeline_name,
pipeline,
if_match=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_pipeline_initial.metadata['url']
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(pipeline, 'PipelineResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PipelineResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_pipeline_initial.metadata = {'url': '/pipelines/{pipelineName}'}
def begin_create_or_update_pipeline(
self,
pipeline_name,
pipeline,
if_match=None,
**kwargs
):
polling = kwargs.pop('polling', False)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_pipeline_initial(
pipeline_name=pipeline_name,
pipeline=pipeline,
if_match=if_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PipelineResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_pipeline.metadata = {'url': '/pipelines/{pipelineName}'}
def get_pipeline(
self,
pipeline_name,
if_none_match=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
url = self.get_pipeline.metadata['url']
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PipelineResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_pipeline.metadata = {'url': '/pipelines/{pipelineName}'}
def _delete_pipeline_initial(
self,
pipeline_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
url = self._delete_pipeline_initial.metadata['url']
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
_delete_pipeline_initial.metadata = {'url': '/pipelines/{pipelineName}'}
def begin_delete_pipeline(
self,
pipeline_name,
**kwargs
):
polling = kwargs.pop('polling', False)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_pipeline_initial(
pipeline_name=pipeline_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_pipeline.metadata = {'url': '/pipelines/{pipelineName}'}
def create_pipeline_run(
self,
pipeline_name,
reference_pipeline_run_id=None,
is_recovery=None,
start_activity_name=None,
parameters=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create_pipeline_run.metadata['url']
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'pipelineName': self._serialize.url("pipeline_name", pipeline_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if reference_pipeline_run_id is not None:
query_parameters['referencePipelineRunId'] = self._serialize.query("reference_pipeline_run_id", reference_pipeline_run_id, 'str')
if is_recovery is not None:
query_parameters['isRecovery'] = self._serialize.query("is_recovery", is_recovery, 'bool')
if start_activity_name is not None:
query_parameters['startActivityName'] = self._serialize.query("start_activity_name", start_activity_name, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
if parameters is not None:
body_content = self._serialize.body(parameters, '{object}')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.CloudError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CreateRunResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_pipeline_run.metadata = {'url': '/pipelines/{pipelineName}/createRun'}
| true | true |
f731d8ca027353b729bc9ed640d9ad7a07e8266f | 1,587 | py | Python | CIM16/IEC61970/Dynamics/SynchronousMachineDynamics.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | CIM16/IEC61970/Dynamics/SynchronousMachineDynamics.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | null | null | null | CIM16/IEC61970/Dynamics/SynchronousMachineDynamics.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 1 | 2021-04-02T18:04:49.000Z | 2021-04-02T18:04:49.000Z | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Modified by Gustav Holm (guholm@kth.se) & Francis J. Gomez (fragom@kth.se)
# Modified date: 05/06/2017
from CIM16.IEC61970.Dynamics.RotatingMachineDynamics import RotatingMachineDynamics
class SynchronousMachineDynamics(RotatingMachineDynamics):
def __init__(self, *args, **kw_args):
super(SynchronousMachineDynamics, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| 39.675 | 83 | 0.750473 |
from CIM16.IEC61970.Dynamics.RotatingMachineDynamics import RotatingMachineDynamics
class SynchronousMachineDynamics(RotatingMachineDynamics):
def __init__(self, *args, **kw_args):
super(SynchronousMachineDynamics, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| true | true |
f731d8f40c75040a81052514bb8660d957209d22 | 9,955 | py | Python | conans/test/functional/layout/test_in_cache.py | thorbenk/conan | 916cb10b99ed92d319cd0719462708ee0501ecd4 | [
"MIT"
] | null | null | null | conans/test/functional/layout/test_in_cache.py | thorbenk/conan | 916cb10b99ed92d319cd0719462708ee0501ecd4 | [
"MIT"
] | 1 | 2019-06-07T03:02:02.000Z | 2019-06-07T03:02:02.000Z | conans/test/functional/layout/test_in_cache.py | thorbenk/conan | 916cb10b99ed92d319cd0719462708ee0501ecd4 | [
"MIT"
] | 1 | 2021-08-20T19:47:51.000Z | 2021-08-20T19:47:51.000Z | import os
import re
import textwrap
import pytest
from conans import load
from conans.model.ref import ConanFileReference, PackageReference
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
@pytest.fixture
def conanfile():
conan_file = str(GenConanfile().with_import("from conans import tools").with_import("import os").
with_require("base/1.0"))
conan_file += """
no_copy_sources = True
def layout(self):
self.folders.source = "my_sources"
self.folders.build = "my_build"
def source(self):
self.output.warn("Source folder: {}".format(self.source_folder))
# The layout describes where the sources are, not force them to be there
tools.save("my_sources/source.h", "foo")
def build(self):
self.output.warn("Build folder: {}".format(self.build_folder))
tools.save("build.lib", "bar")
def package(self):
self.output.warn("Package folder: {}".format(self.package_folder))
tools.save(os.path.join(self.package_folder, "LICENSE"), "bar")
self.copy("*.h", dst="include")
self.copy("*.lib", dst="lib")
def package_info(self):
# This will be easier when the layout declares also the includedirs etc
self.cpp_info.includedirs = ["include"]
self.cpp_info.libdirs = ["lib"]
"""
return conan_file
def test_create_test_package_no_layout(conanfile):
"""The test package using the new generators work (having the generated files in the build
folder)"""
client = TestClient()
conanfile_test = textwrap.dedent("""
import os
from conans import ConanFile, tools
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "CMakeDeps", "CMakeToolchain"
def build(self):
assert os.path.exists("conan_toolchain.cmake")
self.output.warn("hey! building")
self.output.warn(os.getcwd())
def test(self):
self.output.warn("hey! testing")
""")
client.save({"conanfile.py": GenConanfile(), "test_package/conanfile.py": conanfile_test})
client.run("create . lib/1.0@")
assert "hey! building" in client.out
assert "hey! testing" in client.out
def test_create_test_package_with_layout(conanfile):
"""The test package using the new generators work (having the generated files in the build
folder)"""
client = TestClient()
conanfile_test = textwrap.dedent("""
import os
from conans import ConanFile, tools
from conan.tools.cmake import CMakeToolchain, CMake, CMakeDeps
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def generate(self):
deps = CMakeDeps(self)
deps.generate()
tc = CMakeToolchain(self)
tc.generate()
def layout(self):
self.folders.generators = "my_generators"
def build(self):
assert os.path.exists("my_generators/conan_toolchain.cmake")
self.output.warn("hey! building")
self.output.warn(os.getcwd())
def test(self):
self.output.warn("hey! testing")
""")
client.save({"conanfile.py": GenConanfile(), "test_package/conanfile.py": conanfile_test})
client.run("create . lib/1.0@")
assert "hey! building" in client.out
assert "hey! testing" in client.out
def test_cache_in_layout(conanfile):
"""The layout in the cache is used too, always relative to the "base" folders that the cache
requires. But by the default, the "package" is not followed
"""
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . base/1.0@")
client.save({"conanfile.py": conanfile})
client.run("create . lib/1.0@")
package_id = re.search(r"lib/1.0:(\S+)", str(client.out)).group(1)
ref = ConanFileReference.loads("lib/1.0@")
pref = PackageReference(ref, package_id)
sf = client.cache.package_layout(ref).source()
bf = client.cache.package_layout(ref).build(pref)
pf = client.cache.package_layout(ref).package(pref)
source_folder = os.path.join(sf, "my_sources")
build_folder = os.path.join(bf, "my_build")
# Check folders match with the declared by the layout
assert "Source folder: {}".format(source_folder) in client.out
assert "Build folder: {}".format(build_folder) in client.out
# Check the source folder
assert os.path.exists(os.path.join(source_folder, "source.h"))
# Check the build folder
assert os.path.exists(os.path.join(build_folder, "build.lib"))
# Check the conaninfo
assert os.path.exists(os.path.join(pf, "conaninfo.txt"))
# Search the package in the cache
client.run("search lib/1.0@")
assert "Package_ID: {}".format(package_id) in client.out
# Install the package and check the build info
client.run("install lib/1.0@ -g txt")
binfopath = os.path.join(client.current_folder, "conanbuildinfo.txt")
content = load(binfopath).replace("\r\n", "\n")
assert "[includedirs]\n{}".format(os.path.join(pf, "include")
.replace("\\", "/")) in content
assert "[libdirs]\n{}".format(os.path.join(pf, "lib")
.replace("\\", "/")) in content
def test_same_conanfile_local(conanfile):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . base/1.0@")
client.save({"conanfile.py": conanfile})
source_folder = os.path.join(client.current_folder, "my_sources")
build_folder = os.path.join(client.current_folder, "my_build")
client.run("install . lib/1.0@ -if=install")
client.run("source . -if=install")
assert "Source folder: {}".format(source_folder) in client.out
assert os.path.exists(os.path.join(source_folder, "source.h"))
client.run("build . -if=install")
assert "Build folder: {}".format(build_folder) in client.out
assert os.path.exists(os.path.join(build_folder, "build.lib"))
client.run("package . -if=install")
# By default, the "package" folder is still used (not breaking)
pf = os.path.join(client.current_folder, "package")
assert "Package folder: {}".format(pf) in client.out
assert os.path.exists(os.path.join(pf, "LICENSE"))
def test_imports():
"""The 'conan imports' follows the layout"""
client = TestClient()
# Hello to be reused
conan_file = str(GenConanfile().with_import("from conans import tools"))
conan_file += """
no_copy_source = True
def build(self):
tools.save("library.dll", "bar")
tools.save("generated.h", "bar")
def package(self):
self.copy("*.h")
self.copy("*.dll")
"""
client.save({"conanfile.py": conan_file})
client.run("create . hello/1.0@")
# Consumer of the hello importing the shared
conan_file = str(GenConanfile().with_import("from conans import tools").with_import("import os"))
conan_file += """
no_copy_source = True
requires = "hello/1.0"
settings = "build_type"
def layout(self):
self.folders.build = "cmake-build-{}".format(str(self.settings.build_type).lower())
self.folders.imports = os.path.join(self.folders.build, "my_imports")
def imports(self):
self.output.warn("Imports folder: {}".format(self.imports_folder))
self.copy("*.dll")
def build(self):
assert self.build_folder != self.imports_folder
assert "cmake-build-release" in self.build_folder
assert os.path.exists(os.path.join(self.imports_folder, "library.dll"))
assert os.path.exists(os.path.join(self.build_folder, "my_imports", "library.dll"))
self.output.warn("Built and imported!")
"""
client.save({"conanfile.py": conan_file})
client.run("create . consumer/1.0@ ")
assert "Built and imported!" in client.out
def test_cpp_package():
client = TestClient()
conan_hello = textwrap.dedent("""
import os
from conans import ConanFile
from conan.tools.files import save
class Pkg(ConanFile):
def package(self):
save(self, os.path.join(self.package_folder, "foo/include/foo.h"), "")
save(self, os.path.join(self.package_folder,"foo/libs/foo.lib"), "")
def layout(self):
self.cpp.package.includedirs = ["foo/include"]
self.cpp.package.libdirs = ["foo/libs"]
self.cpp.package.libs = ["foo"]
""")
client.save({"conanfile.py": conan_hello})
client.run("create . hello/1.0@")
conan_consumer = textwrap.dedent("""
from conans import ConanFile
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "hello/1.0"
generators = "CMakeDeps"
def generate(self):
info = self.dependencies["hello"].cpp_info
self.output.warn("**includedirs:{}**".format(info.includedirs))
self.output.warn("**libdirs:{}**".format(info.libdirs))
self.output.warn("**libs:{}**".format(info.libs))
""")
client.save({"conanfile.py": conan_consumer})
client.run("install .")
assert "**includedirs:['foo/include']**" in client.out
assert "**libdirs:['foo/libs']**" in client.out
assert "**libs:['foo']**" in client.out
cmake = client.load("hello-release-x86_64-data.cmake")
assert 'set(hello_INCLUDE_DIRS_RELEASE "${hello_PACKAGE_FOLDER_RELEASE}/foo/include")' in cmake
assert 'set(hello_LIB_DIRS_RELEASE "${hello_PACKAGE_FOLDER_RELEASE}/foo/libs")' in cmake
assert 'set(hello_LIBS_RELEASE foo)' in cmake
| 36.465201 | 101 | 0.631241 | import os
import re
import textwrap
import pytest
from conans import load
from conans.model.ref import ConanFileReference, PackageReference
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient
@pytest.fixture
def conanfile():
conan_file = str(GenConanfile().with_import("from conans import tools").with_import("import os").
with_require("base/1.0"))
conan_file += """
no_copy_sources = True
def layout(self):
self.folders.source = "my_sources"
self.folders.build = "my_build"
def source(self):
self.output.warn("Source folder: {}".format(self.source_folder))
# The layout describes where the sources are, not force them to be there
tools.save("my_sources/source.h", "foo")
def build(self):
self.output.warn("Build folder: {}".format(self.build_folder))
tools.save("build.lib", "bar")
def package(self):
self.output.warn("Package folder: {}".format(self.package_folder))
tools.save(os.path.join(self.package_folder, "LICENSE"), "bar")
self.copy("*.h", dst="include")
self.copy("*.lib", dst="lib")
def package_info(self):
# This will be easier when the layout declares also the includedirs etc
self.cpp_info.includedirs = ["include"]
self.cpp_info.libdirs = ["lib"]
"""
return conan_file
def test_create_test_package_no_layout(conanfile):
client = TestClient()
conanfile_test = textwrap.dedent("""
import os
from conans import ConanFile, tools
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "CMakeDeps", "CMakeToolchain"
def build(self):
assert os.path.exists("conan_toolchain.cmake")
self.output.warn("hey! building")
self.output.warn(os.getcwd())
def test(self):
self.output.warn("hey! testing")
""")
client.save({"conanfile.py": GenConanfile(), "test_package/conanfile.py": conanfile_test})
client.run("create . lib/1.0@")
assert "hey! building" in client.out
assert "hey! testing" in client.out
def test_create_test_package_with_layout(conanfile):
client = TestClient()
conanfile_test = textwrap.dedent("""
import os
from conans import ConanFile, tools
from conan.tools.cmake import CMakeToolchain, CMake, CMakeDeps
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def generate(self):
deps = CMakeDeps(self)
deps.generate()
tc = CMakeToolchain(self)
tc.generate()
def layout(self):
self.folders.generators = "my_generators"
def build(self):
assert os.path.exists("my_generators/conan_toolchain.cmake")
self.output.warn("hey! building")
self.output.warn(os.getcwd())
def test(self):
self.output.warn("hey! testing")
""")
client.save({"conanfile.py": GenConanfile(), "test_package/conanfile.py": conanfile_test})
client.run("create . lib/1.0@")
assert "hey! building" in client.out
assert "hey! testing" in client.out
def test_cache_in_layout(conanfile):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . base/1.0@")
client.save({"conanfile.py": conanfile})
client.run("create . lib/1.0@")
package_id = re.search(r"lib/1.0:(\S+)", str(client.out)).group(1)
ref = ConanFileReference.loads("lib/1.0@")
pref = PackageReference(ref, package_id)
sf = client.cache.package_layout(ref).source()
bf = client.cache.package_layout(ref).build(pref)
pf = client.cache.package_layout(ref).package(pref)
source_folder = os.path.join(sf, "my_sources")
build_folder = os.path.join(bf, "my_build")
assert "Source folder: {}".format(source_folder) in client.out
assert "Build folder: {}".format(build_folder) in client.out
assert os.path.exists(os.path.join(source_folder, "source.h"))
assert os.path.exists(os.path.join(build_folder, "build.lib"))
assert os.path.exists(os.path.join(pf, "conaninfo.txt"))
client.run("search lib/1.0@")
assert "Package_ID: {}".format(package_id) in client.out
client.run("install lib/1.0@ -g txt")
binfopath = os.path.join(client.current_folder, "conanbuildinfo.txt")
content = load(binfopath).replace("\r\n", "\n")
assert "[includedirs]\n{}".format(os.path.join(pf, "include")
.replace("\\", "/")) in content
assert "[libdirs]\n{}".format(os.path.join(pf, "lib")
.replace("\\", "/")) in content
def test_same_conanfile_local(conanfile):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . base/1.0@")
client.save({"conanfile.py": conanfile})
source_folder = os.path.join(client.current_folder, "my_sources")
build_folder = os.path.join(client.current_folder, "my_build")
client.run("install . lib/1.0@ -if=install")
client.run("source . -if=install")
assert "Source folder: {}".format(source_folder) in client.out
assert os.path.exists(os.path.join(source_folder, "source.h"))
client.run("build . -if=install")
assert "Build folder: {}".format(build_folder) in client.out
assert os.path.exists(os.path.join(build_folder, "build.lib"))
client.run("package . -if=install")
pf = os.path.join(client.current_folder, "package")
assert "Package folder: {}".format(pf) in client.out
assert os.path.exists(os.path.join(pf, "LICENSE"))
def test_imports():
client = TestClient()
conan_file = str(GenConanfile().with_import("from conans import tools"))
conan_file += """
no_copy_source = True
def build(self):
tools.save("library.dll", "bar")
tools.save("generated.h", "bar")
def package(self):
self.copy("*.h")
self.copy("*.dll")
"""
client.save({"conanfile.py": conan_file})
client.run("create . hello/1.0@")
conan_file = str(GenConanfile().with_import("from conans import tools").with_import("import os"))
conan_file += """
no_copy_source = True
requires = "hello/1.0"
settings = "build_type"
def layout(self):
self.folders.build = "cmake-build-{}".format(str(self.settings.build_type).lower())
self.folders.imports = os.path.join(self.folders.build, "my_imports")
def imports(self):
self.output.warn("Imports folder: {}".format(self.imports_folder))
self.copy("*.dll")
def build(self):
assert self.build_folder != self.imports_folder
assert "cmake-build-release" in self.build_folder
assert os.path.exists(os.path.join(self.imports_folder, "library.dll"))
assert os.path.exists(os.path.join(self.build_folder, "my_imports", "library.dll"))
self.output.warn("Built and imported!")
"""
client.save({"conanfile.py": conan_file})
client.run("create . consumer/1.0@ ")
assert "Built and imported!" in client.out
def test_cpp_package():
client = TestClient()
conan_hello = textwrap.dedent("""
import os
from conans import ConanFile
from conan.tools.files import save
class Pkg(ConanFile):
def package(self):
save(self, os.path.join(self.package_folder, "foo/include/foo.h"), "")
save(self, os.path.join(self.package_folder,"foo/libs/foo.lib"), "")
def layout(self):
self.cpp.package.includedirs = ["foo/include"]
self.cpp.package.libdirs = ["foo/libs"]
self.cpp.package.libs = ["foo"]
""")
client.save({"conanfile.py": conan_hello})
client.run("create . hello/1.0@")
conan_consumer = textwrap.dedent("""
from conans import ConanFile
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "hello/1.0"
generators = "CMakeDeps"
def generate(self):
info = self.dependencies["hello"].cpp_info
self.output.warn("**includedirs:{}**".format(info.includedirs))
self.output.warn("**libdirs:{}**".format(info.libdirs))
self.output.warn("**libs:{}**".format(info.libs))
""")
client.save({"conanfile.py": conan_consumer})
client.run("install .")
assert "**includedirs:['foo/include']**" in client.out
assert "**libdirs:['foo/libs']**" in client.out
assert "**libs:['foo']**" in client.out
cmake = client.load("hello-release-x86_64-data.cmake")
assert 'set(hello_INCLUDE_DIRS_RELEASE "${hello_PACKAGE_FOLDER_RELEASE}/foo/include")' in cmake
assert 'set(hello_LIB_DIRS_RELEASE "${hello_PACKAGE_FOLDER_RELEASE}/foo/libs")' in cmake
assert 'set(hello_LIBS_RELEASE foo)' in cmake
| true | true |
f731d92f11113ea1707e78d82712af11992ee782 | 1,834 | py | Python | plugins/digitalocean/komand_digitalocean/actions/shutdown_droplet/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/digitalocean/komand_digitalocean/actions/shutdown_droplet/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/digitalocean/komand_digitalocean/actions/shutdown_droplet/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import komand
import json
import requests
from .schema import ShutdownDropletInput, ShutdownDropletOutput
class ShutdownDroplet(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="shutdown_droplet",
description="Shuts down the droplet from a specified image",
input=ShutdownDropletInput(),
output=ShutdownDropletOutput(),
)
def run(self, params={}):
url = "https://api.digitalocean.com/v2/droplets/{droplet_id}/actions"
droplet_id = str(params["droplet_id"])
payload = {"type": "shutdown"}
try:
response = requests.post(
headers=self.connection.headers,
url=url.format(droplet_id=droplet_id),
data=json.dumps(payload),
)
if response.status_code == 201:
return {"success": True}
else:
self.logger.error("Status code: %s, message: %s", response.status_code, response.json()["message"])
Exception("Non-201 status code received")
except requests.exceptions.RequestException:
self.logger.error("An unexpected error occurred during the API request")
raise
def test(self):
url = "https://api.digitalocean.com/v2/account"
try:
response = requests.get(headers=self.connection.headers, url=url)
if response.status_code == 200:
return {}
else:
self.logger.error("Status code: %s, message: %s", response.status_code, response.json()["message"])
Exception("Non-200 status code received")
except requests.exceptions.RequestException:
self.logger.error("An unexpected error occurred during the API request")
| 35.960784 | 115 | 0.602508 | import komand
import json
import requests
from .schema import ShutdownDropletInput, ShutdownDropletOutput
class ShutdownDroplet(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="shutdown_droplet",
description="Shuts down the droplet from a specified image",
input=ShutdownDropletInput(),
output=ShutdownDropletOutput(),
)
def run(self, params={}):
url = "https://api.digitalocean.com/v2/droplets/{droplet_id}/actions"
droplet_id = str(params["droplet_id"])
payload = {"type": "shutdown"}
try:
response = requests.post(
headers=self.connection.headers,
url=url.format(droplet_id=droplet_id),
data=json.dumps(payload),
)
if response.status_code == 201:
return {"success": True}
else:
self.logger.error("Status code: %s, message: %s", response.status_code, response.json()["message"])
Exception("Non-201 status code received")
except requests.exceptions.RequestException:
self.logger.error("An unexpected error occurred during the API request")
raise
def test(self):
url = "https://api.digitalocean.com/v2/account"
try:
response = requests.get(headers=self.connection.headers, url=url)
if response.status_code == 200:
return {}
else:
self.logger.error("Status code: %s, message: %s", response.status_code, response.json()["message"])
Exception("Non-200 status code received")
except requests.exceptions.RequestException:
self.logger.error("An unexpected error occurred during the API request")
| true | true |
f731d9abc87872b2a6859052c5d9bddd62dc2bbf | 7,144 | py | Python | statsmodels/regression/feasible_gls.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 15 | 2015-03-03T09:47:42.000Z | 2022-01-05T18:28:31.000Z | statsmodels/regression/feasible_gls.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 7 | 2015-11-20T08:33:04.000Z | 2020-07-24T19:34:39.000Z | statsmodels/regression/feasible_gls.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 14 | 2015-01-06T22:08:34.000Z | 2021-01-01T16:33:23.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 20 20:24:20 2011
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.python import range
import numpy as np
import statsmodels.base.model as base
from statsmodels.regression.linear_model import OLS, GLS, WLS, RegressionResults
def atleast_2dcols(x):
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
return x
class GLSHet2(GLS):
'''WLS with heteroscedasticity that depends on explanatory variables
note: mixing GLS sigma and weights for heteroscedasticity might not make
sense
I think rewriting following the pattern of GLSAR is better
stopping criteria: improve in GLSAR also, e.g. change in rho
'''
def __init__(self, endog, exog, exog_var, sigma=None):
self.exog_var = atleast_2dcols(exog_var)
super(self.__class__, self).__init__(endog, exog, sigma=sigma)
def fit(self, lambd=1.):
#maybe iterate
#preliminary estimate
res_gls = GLS(self.endog, self.exog, sigma=self.sigma).fit()
res_resid = OLS(res_gls.resid**2, self.exog_var).fit()
#or log-link
#res_resid = OLS(np.log(res_gls.resid**2), self.exog_var).fit()
#here I could use whiten and current instance instead of delegating
#but this is easier
#see pattern of GLSAR, calls self.initialize and self.fit
res_wls = WLS(self.endog, self.exog, weights=1./res_resid.fittedvalues).fit()
res_wls._results.results_residual_regression = res_resid
return res_wls
class GLSHet(WLS):
"""
A regression model with an estimated heteroscedasticity.
A subclass of WLS, that additionally estimates the weight matrix as a
function of additional explanatory variables.
Parameters
----------
endog : array_like
exog : array_like
exog_var : array_like, 1d or 2d
regressors, explanatory variables for the variance
weights : array_like or None
If weights are given, then they are used in the first step estimation.
link : link function or None
If None, then the variance is assumed to be a linear combination of
the exog_var. If given, then ... not tested yet
*extra attributes*
history : dict
contains the parameter estimates in both regression for each iteration
result instance has
results_residual_regression : OLS result instance
result of heteroscedasticity estimation
except for fit_iterative all methods are inherited from WLS.
Notes
-----
GLSHet is considered to be experimental.
`fit` is just standard WLS fit for fixed weights
`fit_iterative` updates the estimate for weights, see its docstring
The two alternative for handling heteroscedasticity in the data are to
use heteroscedasticity robust standard errors or estimating the
heteroscedasticity
Estimating heteroscedasticity and using weighted least squares produces
smaller confidence intervals for the estimated parameters then the
heteroscedasticity robust standard errors if the heteroscedasticity is
correctly specified. If the heteroscedasticity is incorrectly specified
then the estimated covariance is inconsistent.
Stock and Watson for example argue in favor of using OLS with
heteroscedasticity robust standard errors instead of GLSHet sind we are
seldom sure enough about the correct specification (in economics).
GLSHet has asymptotically the same distribution as WLS if the true
weights are know. In both cases the asymptotic distribution of the
parameter estimates is the normal distribution.
The assumption of the model:
y = X*beta + u,
with E(u) = 0, E(X*u)=0, var(u_i) = z_i*gamma
or for vector of all observations Sigma = diag(Z*gamma)
where
y : endog (nobs)
X : exog (nobs, k_vars)
Z : exog_var (nobs, k_vars2)
beta, gamma estimated parameters
If a link is specified, then the heteroscedasticity is
var(u_i) = link.inverse(z_i*gamma), or
link(var(u_i)) = z_i*gamma
for example for log-linkg
var(u_i) = exp(z_i*gamma)
Usage : see example ....
TODO: test link option
"""
def __init__(self, endog, exog, exog_var=None, weights=None, link=None):
self.exog_var = atleast_2dcols(exog_var)
if weights is None:
weights = np.ones(endog.shape)
if link is not None:
self.link = link
self.linkinv = link.inverse #as defined in families.links
else:
self.link = lambda x: x #no transformation
self.linkinv = lambda x: x
super(self.__class__, self).__init__(endog, exog, weights=weights)
def iterative_fit(self, maxiter=3):
"""
Perform an iterative two-step procedure to estimate a WLS model.
The model is assumed to have heteroscedastic errors.
The variance is estimated by OLS regression of the link transformed
squared residuals on Z, i.e.::
link(sigma_i) = x_i*gamma.
Parameters
----------
maxiter : integer, optional
the number of iterations
Notes
-----
maxiter=1: returns the estimated based on given weights
maxiter=2: performs a second estimation with the updated weights,
this is 2-step estimation
maxiter>2: iteratively estimate and update the weights
TODO: possible extension stop iteration if change in parameter
estimates is smaller than x_tol
Repeated calls to fit_iterative, will do one redundant pinv_wexog
calculation. Calling fit_iterative(maxiter) ones does not do any
redundant recalculations (whitening or calculating pinv_wexog).
"""
import collections
self.history = collections.defaultdict(list) #not really necessary
res_resid = None #if maxiter < 2 no updating
for i in range(maxiter):
#pinv_wexog is cached
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
#self.initialize()
#print 'wls self',
results = self.fit()
self.history['self_params'].append(results.params)
if not i == maxiter-1: #skip for last iteration, could break instead
#print 'ols',
self.results_old = results #for debugging
#estimate heteroscedasticity
res_resid = OLS(self.link(results.resid**2), self.exog_var).fit()
self.history['ols_params'].append(res_resid.params)
#update weights
self.weights = 1./self.linkinv(res_resid.fittedvalues)
self.weights /= self.weights.max() #not required
self.weights[self.weights < 1e-14] = 1e-14 #clip
#print 'in iter', i, self.weights.var() #debug, do weights change
self.initialize()
#note results is the wrapper, results._results is the results instance
results._results.results_residual_regression = res_resid
return results
| 34.181818 | 85 | 0.663074 |
from statsmodels.compat.python import range
import numpy as np
import statsmodels.base.model as base
from statsmodels.regression.linear_model import OLS, GLS, WLS, RegressionResults
def atleast_2dcols(x):
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
return x
class GLSHet2(GLS):
def __init__(self, endog, exog, exog_var, sigma=None):
self.exog_var = atleast_2dcols(exog_var)
super(self.__class__, self).__init__(endog, exog, sigma=sigma)
def fit(self, lambd=1.):
res_gls = GLS(self.endog, self.exog, sigma=self.sigma).fit()
res_resid = OLS(res_gls.resid**2, self.exog_var).fit()
res_wls = WLS(self.endog, self.exog, weights=1./res_resid.fittedvalues).fit()
res_wls._results.results_residual_regression = res_resid
return res_wls
class GLSHet(WLS):
def __init__(self, endog, exog, exog_var=None, weights=None, link=None):
self.exog_var = atleast_2dcols(exog_var)
if weights is None:
weights = np.ones(endog.shape)
if link is not None:
self.link = link
self.linkinv = link.inverse
else:
self.link = lambda x: x
self.linkinv = lambda x: x
super(self.__class__, self).__init__(endog, exog, weights=weights)
def iterative_fit(self, maxiter=3):
import collections
self.history = collections.defaultdict(list)
res_resid = None
for i in range(maxiter):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
results = self.fit()
self.history['self_params'].append(results.params)
if not i == maxiter-1:
self.results_old = results
res_resid = OLS(self.link(results.resid**2), self.exog_var).fit()
self.history['ols_params'].append(res_resid.params)
self.weights = 1./self.linkinv(res_resid.fittedvalues)
self.weights /= self.weights.max()
self.weights[self.weights < 1e-14] = 1e-14
tialize()
results._results.results_residual_regression = res_resid
return results
| true | true |
f731da5a81c641e31e53c884e6c9639b29b76505 | 9,408 | py | Python | openmixup/models/heads/mim_head.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 10 | 2021-12-30T10:22:27.000Z | 2022-03-30T02:31:38.000Z | openmixup/models/heads/mim_head.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 3 | 2022-01-20T21:02:48.000Z | 2022-03-19T13:49:45.000Z | openmixup/models/heads/mim_head.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from torch.nn import functional as F
from mmcv.cnn.utils.weight_init import trunc_normal_init
from ..builder import build_loss
from ..registry import HEADS
from .cls_head import ClsHead
from openmixup.utils import print_log
@HEADS.register_module
class MAEPretrainHead(BaseModule):
"""Pre-training head for MAE.
Args:
norm_pix_loss (bool): Whether or not normalize target.
Defaults to False.
patch_size (int): Patch size. Defaults to 16.
"""
def __init__(self, norm_pix=False, patch_size=16):
super(MAEPretrainHead, self).__init__()
self.norm_pix = norm_pix
self.patch_size = patch_size
def patchify(self, imgs):
p = self.patch_size
assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
h = w = imgs.shape[2] // p
x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
x = torch.einsum('nchpwq->nhwpqc', x)
x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))
return x
def forward(self, x, x_rec, mask):
losses = dict()
target = self.patchify(x)
if self.norm_pix:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.e-6)**.5
loss = (x_rec - target)**2
loss = loss.mean(dim=-1)
loss = (loss * mask).sum() / mask.sum()
losses['loss'] = loss
return losses
@HEADS.register_module()
class MAEFinetuneHead(ClsHead):
"""Fine-tuning head for MAE.
Args:
embed_dim (int): The dim of the feature before the classifier head.
num_classes (int): The total classes. Defaults to 1000.
"""
def __init__(self, **kwargs):
super(MAEFinetuneHead, self).__init__(**kwargs)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=2e-5, bias=0)
def forward(self, x):
""""Get the logits."""
assert isinstance(x, (tuple, list)) and len(x) == 1
x = x[0]
return [self.fc(x)]
@HEADS.register_module()
class MAELinprobeHead(ClsHead):
"""Linear probing head for MAE.
Args:
embed_dim (int): The dim of the feature before the classifier head.
num_classes (int): The total classes. Defaults to 1000.
"""
def __init__(self, in_channels=786, **kwargs):
super(MAELinprobeHead, self).__init__(in_channels=in_channels, **kwargs)
self.bn = nn.BatchNorm1d(in_channels, affine=False, eps=1e-6)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=0.01, bias=0)
def forward(self, x):
""""Get the logits."""
assert isinstance(x, (tuple, list)) and len(x) == 1
x = self.bn(x[0])
return [self.fc(x)]
@HEADS.register_module
class SimMIMHead(BaseModule):
"""Pretrain Head for SimMIM.
Args:
encoder_in_channels (int): Number of input channels for encoder.
"""
def __init__(self, encoder_in_channels=3):
super(SimMIMHead, self).__init__()
self.encoder_in_channels = encoder_in_channels
def forward(self, x, x_rec, mask):
scale_h, scale_w = x.size(2) / mask.size(1), x.size(3) / mask.size(2)
if scale_h > 1:
mask = mask.repeat_interleave(int(scale_h), 1).repeat_interleave(
int(scale_w), 2).unsqueeze(1).contiguous()
else:
mask = F.interpolate(mask.type_as(x).unsqueeze(1),
scale_factor=(scale_h, scale_w), mode="nearest")
loss_rec = F.l1_loss(x_rec, x, reduction='none')
loss = (loss_rec * mask).sum() / (mask.sum() +
1e-5) / self.encoder_in_channels
losses = dict()
losses['loss'] = loss
return losses
@HEADS.register_module
class MIMHead(BaseModule):
"""Head for A2MIM training.
Args:
loss (dict): Config of regression loss.
encoder_in_channels (int): Number of input channels for encoder.
unmask_weight (float): Loss weight for unmasked patches.
fft_weight (float): Loss weight for the fft prediction loss. Default to 0.
fft_reweight (bool): Whether to use the fft reweight loss. Default to False.
fft_focal (bool): Whether to adopt the focal fft loss. Default to False.
fft_unmask_replace (str): Mode to replace (detach) unmask patches for the fft
loss, in {None, 'target', 'prediction', 'mean', 'mixed',}.
fft_unmask_weight (float): Loss weight to caculate the fft loss on unmask
tokens. Default to 0.
"""
def __init__(self,
loss=dict(
type='RegressionLoss', loss_weight=1.0, mode="l1_loss"),
encoder_in_channels=3,
unmask_weight=0,
fft_weight=0,
fft_reweight=False,
fft_focal=False,
fft_unmask_replace=None,
fft_unmask_weight=0,
**kwargs,
):
super(MIMHead, self).__init__()
self.encoder_in_channels = encoder_in_channels
self.unmask_weight = unmask_weight
self.fft_weight = fft_weight
self.fft_reweight = fft_reweight
self.fft_focal = fft_focal
self.fft_unmask_weight = fft_unmask_weight
self.fft_unmask_replace = fft_unmask_replace
assert fft_unmask_replace in [None, 'target', 'prediction', 'mean', 'mixed',]
assert 0 <= unmask_weight <= 1 and 0 <= fft_unmask_weight <= 1
if self.unmask_weight < 1:
if fft_unmask_replace is None and fft_weight > 0:
self.fft_unmask_replace = 'target'
print_log("When using the fft loss, `fft_unmask_replace` should " + \
"not be None. Reset as `fft_unmask_replace='target'`.")
# spatial loss
assert loss is None or isinstance(loss, dict)
if loss is None:
loss = dict(
type='RegressionLoss', loss_weight=1.0, mode="l1_loss")
self.criterion = build_loss(loss)
# fft loss
if fft_focal:
fft_loss = dict(
type='FocalFrequencyLoss', loss_weight=1.0, alpha=1.0,
ave_spectrum=True, log_matrix=True, batch_matrix=True)
else:
fft_loss = loss
if loss["mode"] not in ["l1_loss", "mse_loss", "focal_l1_loss", "focal_mse_loss",]:
fft_loss['mode'] = "l1_loss"
self.fft_loss = build_loss(fft_loss)
def forward(self, x, x_rec, mask):
# upsampling mask
scale_h, scale_w = x.size(2) / mask.size(1), x.size(3) / mask.size(2)
if scale_h > 1:
mask = mask.repeat_interleave(int(scale_h), 1).repeat_interleave(
int(scale_w), 2).unsqueeze(1).contiguous()
else:
mask = F.interpolate(mask.type_as(x).unsqueeze(1),
scale_factor=(scale_h, scale_w), mode="nearest")
# spatial loss
if self.unmask_weight > 0.:
# reweight unmasked patches
mask_s = mask.clone()
mask_s = mask_s + (1. - mask_s) * self.unmask_weight
else:
mask_s = mask
loss_rec = self.criterion(x_rec, target=x, reduction_override='none')
loss_rec = (loss_rec * mask_s).sum() / (mask_s.sum() + 1e-5) / self.encoder_in_channels
# fourier domain loss
if self.fft_weight > 0:
# replace unmask patches (with detach)
x_replace = None
if self.fft_unmask_replace is not None:
if self.fft_unmask_replace == 'target':
x_replace = x.clone()
elif self.fft_unmask_replace == 'prediction':
x_replace = x_rec.clone().detach()
elif self.fft_unmask_replace == 'mean':
x_replace = x.mean(dim=[2, 3], keepdim=True).expand(x.size())
elif self.fft_unmask_replace == 'mixed':
x_replace = 0.5 * x_rec.clone().detach() + 0.5 * x.clone()
if self.fft_unmask_weight < 1:
mask_f = mask.clone()
mask_f = mask_f + (1. - mask_f) * self.fft_unmask_weight
x_rec = (x_rec * mask_f) + (x_replace * (1. - mask_f)) # replace unmask tokens
# apply fft loss
if self.fft_focal:
loss_fft = self.fft_loss(x_rec, x)
else:
f_x = torch.fft.fftn(x, dim=(2, 3), norm='ortho')
f_x_rec = torch.fft.fftn(x_rec, dim=(2, 3), norm='ortho')
if self.fft_reweight:
loss_fft = self.fft_loss(f_x_rec, target=f_x, reduction_override='none')
fft_weight = loss_fft.clone().detach()
loss_fft = (fft_weight * loss_fft).mean()
else:
loss_fft = self.fft_loss(f_x_rec, target=f_x, reduction_override='mean')
loss_rec += self.fft_weight * loss_fft
losses = dict()
losses['loss'] = loss_rec
return losses
| 37.333333 | 95 | 0.573236 | import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from torch.nn import functional as F
from mmcv.cnn.utils.weight_init import trunc_normal_init
from ..builder import build_loss
from ..registry import HEADS
from .cls_head import ClsHead
from openmixup.utils import print_log
@HEADS.register_module
class MAEPretrainHead(BaseModule):
def __init__(self, norm_pix=False, patch_size=16):
super(MAEPretrainHead, self).__init__()
self.norm_pix = norm_pix
self.patch_size = patch_size
def patchify(self, imgs):
p = self.patch_size
assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
h = w = imgs.shape[2] // p
x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
x = torch.einsum('nchpwq->nhwpqc', x)
x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))
return x
def forward(self, x, x_rec, mask):
losses = dict()
target = self.patchify(x)
if self.norm_pix:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.e-6)**.5
loss = (x_rec - target)**2
loss = loss.mean(dim=-1)
loss = (loss * mask).sum() / mask.sum()
losses['loss'] = loss
return losses
@HEADS.register_module()
class MAEFinetuneHead(ClsHead):
def __init__(self, **kwargs):
super(MAEFinetuneHead, self).__init__(**kwargs)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=2e-5, bias=0)
def forward(self, x):
assert isinstance(x, (tuple, list)) and len(x) == 1
x = x[0]
return [self.fc(x)]
@HEADS.register_module()
class MAELinprobeHead(ClsHead):
def __init__(self, in_channels=786, **kwargs):
super(MAELinprobeHead, self).__init__(in_channels=in_channels, **kwargs)
self.bn = nn.BatchNorm1d(in_channels, affine=False, eps=1e-6)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=0.01, bias=0)
def forward(self, x):
assert isinstance(x, (tuple, list)) and len(x) == 1
x = self.bn(x[0])
return [self.fc(x)]
@HEADS.register_module
class SimMIMHead(BaseModule):
def __init__(self, encoder_in_channels=3):
super(SimMIMHead, self).__init__()
self.encoder_in_channels = encoder_in_channels
def forward(self, x, x_rec, mask):
scale_h, scale_w = x.size(2) / mask.size(1), x.size(3) / mask.size(2)
if scale_h > 1:
mask = mask.repeat_interleave(int(scale_h), 1).repeat_interleave(
int(scale_w), 2).unsqueeze(1).contiguous()
else:
mask = F.interpolate(mask.type_as(x).unsqueeze(1),
scale_factor=(scale_h, scale_w), mode="nearest")
loss_rec = F.l1_loss(x_rec, x, reduction='none')
loss = (loss_rec * mask).sum() / (mask.sum() +
1e-5) / self.encoder_in_channels
losses = dict()
losses['loss'] = loss
return losses
@HEADS.register_module
class MIMHead(BaseModule):
def __init__(self,
loss=dict(
type='RegressionLoss', loss_weight=1.0, mode="l1_loss"),
encoder_in_channels=3,
unmask_weight=0,
fft_weight=0,
fft_reweight=False,
fft_focal=False,
fft_unmask_replace=None,
fft_unmask_weight=0,
**kwargs,
):
super(MIMHead, self).__init__()
self.encoder_in_channels = encoder_in_channels
self.unmask_weight = unmask_weight
self.fft_weight = fft_weight
self.fft_reweight = fft_reweight
self.fft_focal = fft_focal
self.fft_unmask_weight = fft_unmask_weight
self.fft_unmask_replace = fft_unmask_replace
assert fft_unmask_replace in [None, 'target', 'prediction', 'mean', 'mixed',]
assert 0 <= unmask_weight <= 1 and 0 <= fft_unmask_weight <= 1
if self.unmask_weight < 1:
if fft_unmask_replace is None and fft_weight > 0:
self.fft_unmask_replace = 'target'
print_log("When using the fft loss, `fft_unmask_replace` should " + \
"not be None. Reset as `fft_unmask_replace='target'`.")
assert loss is None or isinstance(loss, dict)
if loss is None:
loss = dict(
type='RegressionLoss', loss_weight=1.0, mode="l1_loss")
self.criterion = build_loss(loss)
if fft_focal:
fft_loss = dict(
type='FocalFrequencyLoss', loss_weight=1.0, alpha=1.0,
ave_spectrum=True, log_matrix=True, batch_matrix=True)
else:
fft_loss = loss
if loss["mode"] not in ["l1_loss", "mse_loss", "focal_l1_loss", "focal_mse_loss",]:
fft_loss['mode'] = "l1_loss"
self.fft_loss = build_loss(fft_loss)
def forward(self, x, x_rec, mask):
scale_h, scale_w = x.size(2) / mask.size(1), x.size(3) / mask.size(2)
if scale_h > 1:
mask = mask.repeat_interleave(int(scale_h), 1).repeat_interleave(
int(scale_w), 2).unsqueeze(1).contiguous()
else:
mask = F.interpolate(mask.type_as(x).unsqueeze(1),
scale_factor=(scale_h, scale_w), mode="nearest")
if self.unmask_weight > 0.:
mask_s = mask.clone()
mask_s = mask_s + (1. - mask_s) * self.unmask_weight
else:
mask_s = mask
loss_rec = self.criterion(x_rec, target=x, reduction_override='none')
loss_rec = (loss_rec * mask_s).sum() / (mask_s.sum() + 1e-5) / self.encoder_in_channels
if self.fft_weight > 0:
x_replace = None
if self.fft_unmask_replace is not None:
if self.fft_unmask_replace == 'target':
x_replace = x.clone()
elif self.fft_unmask_replace == 'prediction':
x_replace = x_rec.clone().detach()
elif self.fft_unmask_replace == 'mean':
x_replace = x.mean(dim=[2, 3], keepdim=True).expand(x.size())
elif self.fft_unmask_replace == 'mixed':
x_replace = 0.5 * x_rec.clone().detach() + 0.5 * x.clone()
if self.fft_unmask_weight < 1:
mask_f = mask.clone()
mask_f = mask_f + (1. - mask_f) * self.fft_unmask_weight
x_rec = (x_rec * mask_f) + (x_replace * (1. - mask_f))
if self.fft_focal:
loss_fft = self.fft_loss(x_rec, x)
else:
f_x = torch.fft.fftn(x, dim=(2, 3), norm='ortho')
f_x_rec = torch.fft.fftn(x_rec, dim=(2, 3), norm='ortho')
if self.fft_reweight:
loss_fft = self.fft_loss(f_x_rec, target=f_x, reduction_override='none')
fft_weight = loss_fft.clone().detach()
loss_fft = (fft_weight * loss_fft).mean()
else:
loss_fft = self.fft_loss(f_x_rec, target=f_x, reduction_override='mean')
loss_rec += self.fft_weight * loss_fft
losses = dict()
losses['loss'] = loss_rec
return losses
| true | true |
f731dac69125b54c276e48cad62937bf921dd204 | 4,008 | py | Python | tests/packets_ipv4_dns.py | mattjhayes/nmeta | 55cc27e81defc42775ff563bfbef31800e089b14 | [
"Apache-2.0"
] | 18 | 2015-02-18T22:53:32.000Z | 2021-11-16T11:36:50.000Z | tests/packets_ipv4_dns.py | mattjhayes/nmeta | 55cc27e81defc42775ff563bfbef31800e089b14 | [
"Apache-2.0"
] | 67 | 2015-01-05T07:27:22.000Z | 2017-07-21T11:38:14.000Z | tests/packets_ipv4_dns.py | mattjhayes/nmeta | 55cc27e81defc42775ff563bfbef31800e089b14 | [
"Apache-2.0"
] | 8 | 2015-05-01T18:35:03.000Z | 2019-10-02T13:54:53.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Packets with metadata to use in testing of nmeta suite
This file is a set of DNS packets
Note: no testing of max_interpacket_interval and
min_interpacket_interval as they become imprecise due
to floating point and when tried using decimal module
found that would not serialise into Pymongo db.
To create test packet data, capture packet in Wireshark and:
For the packet summary:
Right-click packet in top pane, Copy -> Summary (text).
Edit pasted text as appropriate
For the packet hex:
Right-click packet in top pane, Copy -> Bytes -> Hex Stream
For the packet timestamp:
Expand 'Frame' in the middle pane,
right-click 'Epoch Time' Copy -> Value
Packet capture file is 'packets_ipv4_DNS.pcap'
"""
import binascii
#*** Raw packet data:
RAW = []
#*** Packet on the wire lengths in bytes:
LEN = []
#*** Ethernet parameters:
ETH_SRC = []
ETH_DST = []
ETH_TYPE = []
#*** IP addresses:
IP_SRC = []
IP_DST = []
#*** IP protocol number in decimal:
PROTO = []
#*** Transport-layer protocol numbers in decimal:
TP_SRC = []
TP_DST = []
#*** Transport-layer sequence numbers in decimal:
TP_SEQ_SRC = []
TP_SEQ_DST = []
#*** TCP FLAGS:
TCP_SYN = []
TCP_FIN = []
TCP_RST = []
TCP_PSH = []
TCP_ACK = []
#*** HEX-encoded payload
PAYLOAD = []
#*** Packet direction, c2s (client to server) or s2c
DIRECTION = []
#*** DNS specific:
DNS_NAME = []
DNS_CNAME = []
DNS_IP = []
#*** Packet 0 - DNS Query A www.facebook.com
# 76 10.0.2.15 208.67.220.123 DNS Standard query 0x24e8 A www.facebook.com
RAW.append(binascii.unhexlify("5254001235020800278308f008004500003ea14c40004011e0940a00020fd043dc7b1ad20035002a10dd24e801000001000000000000037777770866616365626f6f6b03636f6d0000010001"))
LEN.append(76)
ETH_SRC.append('08:00:27:83:08:f0')
ETH_DST.append('52:54:00:12:35:02')
ETH_TYPE.append(2048)
IP_SRC.append('10.0.2.15')
IP_DST.append('208.67.220.123')
PROTO.append(17)
TP_SRC.append(6866)
TP_DST.append(53)
TP_SEQ_SRC.append(0)
TP_SEQ_DST.append(0)
TCP_SYN.append(0)
TCP_FIN.append(0)
TCP_RST.append(0)
TCP_PSH.append(0)
TCP_ACK.append(0)
PAYLOAD.append("24e801000001000000000000037777770866616365626f6f6b03636f6d0000010001")
DIRECTION.append("")
DNS_NAME.append("www.facebook.com")
DNS_CNAME.append("")
DNS_IP.append("")
#*** Packet 1 -
# 121 208.67.220.123 10.0.2.15 DNS Standard query response 0x24e8 A www.facebook.com CNAME star-mini.c10r.facebook.com A 179.60.193.36
RAW.append(binascii.unhexlify("0800278308f052540012350208004500006bcd1a00004011f499d043dc7b0a00020f00351ad2005777cf24e881800001000200000000037777770866616365626f6f6b03636f6d0000010001c00c0005000100000ab0001109737461722d6d696e690463313072c010c02e00010001000000330004b33cc124"))
LEN.append(121)
ETH_SRC.append('52:54:00:12:35:02')
ETH_DST.append('08:00:27:83:08:f0')
ETH_TYPE.append(2048)
IP_SRC.append('208.67.220.123')
IP_DST.append('10.0.2.15')
PROTO.append(17)
TP_SRC.append(53)
TP_DST.append(6866)
TP_SEQ_SRC.append(0)
TP_SEQ_DST.append(0)
TCP_SYN.append(0)
TCP_FIN.append(0)
TCP_RST.append(0)
TCP_PSH.append(0)
TCP_ACK.append(0)
PAYLOAD.append("24e881800001000200000000037777770866616365626f6f6b03636f6d0000010001c00c0005000100000ab0001109737461722d6d696e690463313072c010c02e00010001000000330004b33cc124")
DIRECTION.append("")
DNS_NAME.append("www.facebook.com")
DNS_CNAME.append("star-mini.c10r.facebook.com")
DNS_IP.append("179.60.193.36")
#*** Metadata for whole flow:
FLOW_IP_CLIENT = ''
FLOW_IP_SERVER = ''
| 30.830769 | 276 | 0.755739 |
import binascii
RAW = []
LEN = []
ETH_SRC = []
ETH_DST = []
ETH_TYPE = []
IP_SRC = []
IP_DST = []
PROTO = []
TP_SRC = []
TP_DST = []
TP_SEQ_SRC = []
TP_SEQ_DST = []
TCP_SYN = []
TCP_FIN = []
TCP_RST = []
TCP_PSH = []
TCP_ACK = []
PAYLOAD = []
DIRECTION = []
DNS_NAME = []
DNS_CNAME = []
DNS_IP = []
RAW.append(binascii.unhexlify("5254001235020800278308f008004500003ea14c40004011e0940a00020fd043dc7b1ad20035002a10dd24e801000001000000000000037777770866616365626f6f6b03636f6d0000010001"))
LEN.append(76)
ETH_SRC.append('08:00:27:83:08:f0')
ETH_DST.append('52:54:00:12:35:02')
ETH_TYPE.append(2048)
IP_SRC.append('10.0.2.15')
IP_DST.append('208.67.220.123')
PROTO.append(17)
TP_SRC.append(6866)
TP_DST.append(53)
TP_SEQ_SRC.append(0)
TP_SEQ_DST.append(0)
TCP_SYN.append(0)
TCP_FIN.append(0)
TCP_RST.append(0)
TCP_PSH.append(0)
TCP_ACK.append(0)
PAYLOAD.append("24e801000001000000000000037777770866616365626f6f6b03636f6d0000010001")
DIRECTION.append("")
DNS_NAME.append("www.facebook.com")
DNS_CNAME.append("")
DNS_IP.append("")
RAW.append(binascii.unhexlify("0800278308f052540012350208004500006bcd1a00004011f499d043dc7b0a00020f00351ad2005777cf24e881800001000200000000037777770866616365626f6f6b03636f6d0000010001c00c0005000100000ab0001109737461722d6d696e690463313072c010c02e00010001000000330004b33cc124"))
LEN.append(121)
ETH_SRC.append('52:54:00:12:35:02')
ETH_DST.append('08:00:27:83:08:f0')
ETH_TYPE.append(2048)
IP_SRC.append('208.67.220.123')
IP_DST.append('10.0.2.15')
PROTO.append(17)
TP_SRC.append(53)
TP_DST.append(6866)
TP_SEQ_SRC.append(0)
TP_SEQ_DST.append(0)
TCP_SYN.append(0)
TCP_FIN.append(0)
TCP_RST.append(0)
TCP_PSH.append(0)
TCP_ACK.append(0)
PAYLOAD.append("24e881800001000200000000037777770866616365626f6f6b03636f6d0000010001c00c0005000100000ab0001109737461722d6d696e690463313072c010c02e00010001000000330004b33cc124")
DIRECTION.append("")
DNS_NAME.append("www.facebook.com")
DNS_CNAME.append("star-mini.c10r.facebook.com")
DNS_IP.append("179.60.193.36")
FLOW_IP_CLIENT = ''
FLOW_IP_SERVER = ''
| true | true |
f731db288f3af2b9725dd5bcb99c2c03c2bd44fa | 399 | py | Python | clubChinois/asgi.py | LonelVino/club-chinois-home | 3e2ecc6728f0b7349adfe10e515e3f5908d09c9d | [
"MIT"
] | null | null | null | clubChinois/asgi.py | LonelVino/club-chinois-home | 3e2ecc6728f0b7349adfe10e515e3f5908d09c9d | [
"MIT"
] | null | null | null | clubChinois/asgi.py | LonelVino/club-chinois-home | 3e2ecc6728f0b7349adfe10e515e3f5908d09c9d | [
"MIT"
] | null | null | null | """
ASGI config for clubChinois project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'clubChinois.settings')
application = get_asgi_application()
| 23.470588 | 78 | 0.789474 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'clubChinois.settings')
application = get_asgi_application()
| true | true |
f731db2b22f21eabf1ae55d4e5b092cecb7772ad | 3,106 | py | Python | hooks/webkitpy/common/thread/messagepump_unittest.py | nizovn/luna-sysmgr | 48b7e2546e81d6ad1604353f2e5ab797a7d1667c | [
"Apache-2.0"
] | 15 | 2015-05-25T19:28:05.000Z | 2021-05-03T09:21:22.000Z | WebKit/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py | JavaScriptTesting/LJS | 9818dbdb421036569fff93124ac2385d45d01c3a | [
"Apache-2.0"
] | 7 | 2022-03-15T13:25:39.000Z | 2022-03-15T13:25:44.000Z | WebKit/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py | JavaScriptTesting/LJS | 9818dbdb421036569fff93124ac2385d45d01c3a | [
"Apache-2.0"
] | 7 | 2019-01-30T08:56:04.000Z | 2021-11-19T16:14:54.000Z | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate
from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
class TestDelegate(MessagePumpDelegate):
def __init__(self):
self.log = []
def schedule(self, interval, callback):
self.callback = callback
self.log.append("schedule")
def message_available(self, message):
self.log.append("message_available: %s" % message)
def final_message_delivered(self):
self.log.append("final_message_delivered")
class MessagePumpTest(unittest.TestCase):
def test_basic(self):
queue = ThreadedMessageQueue()
delegate = TestDelegate()
pump = MessagePump(delegate, queue)
self.assertEqual(delegate.log, [
'schedule'
])
delegate.callback()
queue.post("Hello")
queue.post("There")
delegate.callback()
self.assertEqual(delegate.log, [
'schedule',
'schedule',
'message_available: Hello',
'message_available: There',
'schedule'
])
queue.post("More")
queue.post("Messages")
queue.stop()
delegate.callback()
self.assertEqual(delegate.log, [
'schedule',
'schedule',
'message_available: Hello',
'message_available: There',
'schedule',
'message_available: More',
'message_available: Messages',
'final_message_delivered'
])
| 36.97619 | 79 | 0.691887 |
import unittest
from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate
from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
class TestDelegate(MessagePumpDelegate):
def __init__(self):
self.log = []
def schedule(self, interval, callback):
self.callback = callback
self.log.append("schedule")
def message_available(self, message):
self.log.append("message_available: %s" % message)
def final_message_delivered(self):
self.log.append("final_message_delivered")
class MessagePumpTest(unittest.TestCase):
def test_basic(self):
queue = ThreadedMessageQueue()
delegate = TestDelegate()
pump = MessagePump(delegate, queue)
self.assertEqual(delegate.log, [
'schedule'
])
delegate.callback()
queue.post("Hello")
queue.post("There")
delegate.callback()
self.assertEqual(delegate.log, [
'schedule',
'schedule',
'message_available: Hello',
'message_available: There',
'schedule'
])
queue.post("More")
queue.post("Messages")
queue.stop()
delegate.callback()
self.assertEqual(delegate.log, [
'schedule',
'schedule',
'message_available: Hello',
'message_available: There',
'schedule',
'message_available: More',
'message_available: Messages',
'final_message_delivered'
])
| true | true |
f731dcea61a92d9d646629353eb1706f5a38b82e | 8,251 | py | Python | pwndbg/commands/nearpc.py | jmc1283/pwndbg | df165f0788948b9b12e6a80fa91d647c13e6eb0a | [
"MIT"
] | 21 | 2018-01-01T13:28:56.000Z | 2019-11-06T15:30:56.000Z | pwndbg/commands/nearpc.py | jmc1283/pwndbg | df165f0788948b9b12e6a80fa91d647c13e6eb0a | [
"MIT"
] | null | null | null | pwndbg/commands/nearpc.py | jmc1283/pwndbg | df165f0788948b9b12e6a80fa91d647c13e6eb0a | [
"MIT"
] | 5 | 2018-01-02T01:30:41.000Z | 2020-01-04T05:55:57.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import codecs
import gdb
from capstone import *
import pwndbg.arguments
import pwndbg.color
import pwndbg.color.context as C
import pwndbg.color.disasm as D
import pwndbg.color.nearpc as N
import pwndbg.color.theme
import pwndbg.config
import pwndbg.disasm
import pwndbg.functions
import pwndbg.ida
import pwndbg.regs
import pwndbg.strings
import pwndbg.symbol
import pwndbg.ui
import pwndbg.vmmap
from pwndbg.color import message
def ljust_padding(lst):
longest_len = max(map(len, lst)) if lst else 0
return [s.ljust(longest_len) for s in lst]
nearpc_branch_marker = pwndbg.color.theme.Parameter('nearpc-branch-marker', ' ↓', 'branch marker line for nearpc command')
nearpc_branch_marker_contiguous = pwndbg.color.theme.Parameter('nearpc-branch-marker-contiguous', ' ', 'contiguous branch marker line for nearpc command')
pwndbg.color.theme.Parameter('highlight-pc', True, 'whether to highlight the current instruction')
pwndbg.color.theme.Parameter('nearpc-prefix', '►', 'prefix marker for nearpc command')
pwndbg.config.Parameter('left-pad-disasm', True, 'whether to left-pad disassembly')
nearpc_lines = pwndbg.config.Parameter('nearpc-lines', 10, 'number of additional lines to print for the nearpc command')
show_args = pwndbg.config.Parameter('nearpc-show-args', True, 'show call arguments below instruction')
parser = argparse.ArgumentParser(description='''Disassemble near a specified address.''')
parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to dissassemble near.")
parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
#parser.add_argument("to_string", type=bool, nargs="?", default=False, help="Whether to print it or not.") #TODO make sure this should not be exposed
parser.add_argument("emulate", type=bool, nargs="?", default=False, help="Whether to emulate instructions to find the next ones or just linearly disassemble.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def nearpc(pc=None, lines=None, to_string=False, emulate=False):
"""
Disassemble near a specified address.
"""
# Repeating nearpc (pressing enter) makes it show next addresses
# (writing nearpc explicitly again will reset its state)
if nearpc.repeat:
pc = nearpc.next_pc
result = []
# Fix the case where we only have one argument, and
# it's a small value.
if lines is None and (pc is None or int(pc) < 0x100):
lines = pc
pc = None
if pc is None:
pc = pwndbg.regs.pc
if lines is None:
lines = nearpc_lines // 2
pc = int(pc)
lines = int(lines)
# Check whether we can even read this address
if not pwndbg.memory.peek(pc):
result.append(message.error('Invalid address %#x' % pc))
# # Load source data if it's available
# pc_to_linenos = collections.defaultdict(lambda: [])
# lineno_to_src = {}
# frame = gdb.selected_frame()
# if frame:
# sal = frame.find_sal()
# if sal:
# symtab = sal.symtab
# objfile = symtab.objfile
# sourcefilename = symtab.filename
# with open(sourcefilename, 'r') as sourcefile:
# lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())}
# for line in symtab.linetable():
# pc_to_linenos[line.pc].append(line.line)
instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat)
if pwndbg.memory.peek(pc) and not instructions:
result.append(message.error('Invalid instructions at %#x' % pc))
# In case $pc is in a new map we don't know about,
# this will trigger an exploratory search.
pwndbg.vmmap.find(pc)
# Gather all addresses and symbols for each instruction
symbols = [pwndbg.symbol.get(i.address) for i in instructions]
addresses = ['%#x' % i.address for i in instructions]
nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0
# Format the symbol name for each instruction
symbols = ['<%s> ' % sym if sym else '' for sym in symbols]
# Pad out all of the symbols and addresses
if pwndbg.config.left_pad_disasm and not nearpc.repeat:
symbols = ljust_padding(symbols)
addresses = ljust_padding(addresses)
prev = None
# Print out each instruction
for address_str, symbol, instr in zip(addresses, symbols, instructions):
asm = D.instruction(instr)
prefix_sign = pwndbg.config.nearpc_prefix
# Show prefix only on the specified address and don't show it while in repeat-mode
show_prefix = instr.address == pc and not nearpc.repeat
prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))
prefix = N.prefix(prefix)
pre = pwndbg.ida.Anterior(instr.address)
if pre:
result.append(N.ida_anterior(pre))
# Colorize address and symbol if not highlighted
# symbol is fetched from gdb and it can be e.g. '<main+8>'
if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:
address_str = N.address(address_str)
symbol = N.symbol(symbol)
elif pwndbg.config.highlight_pc:
prefix = C.highlight(prefix)
address_str = C.highlight(address_str)
symbol = C.highlight(symbol)
line = ' '.join((prefix, address_str, symbol, asm))
# If there was a branch before this instruction which was not
# contiguous, put in some ellipses.
if prev and prev.address + prev.size != instr.address:
result.append(N.branch_marker('%s' % nearpc_branch_marker))
# Otherwise if it's a branch and it *is* contiguous, just put
# and empty line.
elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)):
if len('%s' % nearpc_branch_marker_contiguous) > 0:
result.append('%s' % nearpc_branch_marker_contiguous)
# For syscall instructions, put the name on the side
if instr.address == pc:
syscall_name = pwndbg.arguments.get_syscall_name(instr)
if syscall_name:
line += ' <%s>' % N.syscall_name(syscall_name)
result.append(line)
# For call instructions, attempt to resolve the target and
# determine the number of arguments.
if show_args:
result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)])
prev = instr
if not to_string:
print('\n'.join(result))
return result
parser = argparse.ArgumentParser(description='''Like nearpc, but will emulate instructions from the current $PC forward.''')
parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to emulate near.")
parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def emulate(pc=None, lines=None, to_string=False, emulate=True):
"""
Like nearpc, but will emulate instructions from the current $PC forward.
"""
nearpc.repeat = emulate_command.repeat
return nearpc(pc, lines, to_string, emulate)
emulate_command = emulate
parser = argparse.ArgumentParser(description='''Compatibility layer for PEDA's pdisass command.''')
parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to disassemble near.")
parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def pdisass(pc=None, lines=None, to_string=False):
"""
Compatibility layer for PEDA's pdisass command
"""
nearpc.repeat = pdisass.repeat
return nearpc(pc, lines, to_string, False)
nearpc.next_pc = 0
| 39.290476 | 159 | 0.688038 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import codecs
import gdb
from capstone import *
import pwndbg.arguments
import pwndbg.color
import pwndbg.color.context as C
import pwndbg.color.disasm as D
import pwndbg.color.nearpc as N
import pwndbg.color.theme
import pwndbg.config
import pwndbg.disasm
import pwndbg.functions
import pwndbg.ida
import pwndbg.regs
import pwndbg.strings
import pwndbg.symbol
import pwndbg.ui
import pwndbg.vmmap
from pwndbg.color import message
def ljust_padding(lst):
longest_len = max(map(len, lst)) if lst else 0
return [s.ljust(longest_len) for s in lst]
nearpc_branch_marker = pwndbg.color.theme.Parameter('nearpc-branch-marker', ' ↓', 'branch marker line for nearpc command')
nearpc_branch_marker_contiguous = pwndbg.color.theme.Parameter('nearpc-branch-marker-contiguous', ' ', 'contiguous branch marker line for nearpc command')
pwndbg.color.theme.Parameter('highlight-pc', True, 'whether to highlight the current instruction')
pwndbg.color.theme.Parameter('nearpc-prefix', '►', 'prefix marker for nearpc command')
pwndbg.config.Parameter('left-pad-disasm', True, 'whether to left-pad disassembly')
nearpc_lines = pwndbg.config.Parameter('nearpc-lines', 10, 'number of additional lines to print for the nearpc command')
show_args = pwndbg.config.Parameter('nearpc-show-args', True, 'show call arguments below instruction')
parser = argparse.ArgumentParser(description='''Disassemble near a specified address.''')
parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to dissassemble near.")
parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
nargs="?", default=False, help="Whether to emulate instructions to find the next ones or just linearly disassemble.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def nearpc(pc=None, lines=None, to_string=False, emulate=False):
if nearpc.repeat:
pc = nearpc.next_pc
result = []
if lines is None and (pc is None or int(pc) < 0x100):
lines = pc
pc = None
if pc is None:
pc = pwndbg.regs.pc
if lines is None:
lines = nearpc_lines // 2
pc = int(pc)
lines = int(lines)
# Check whether we can even read this address
if not pwndbg.memory.peek(pc):
result.append(message.error('Invalid address %
# # Load source data if it's available
instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat)
if pwndbg.memory.peek(pc) and not instructions:
result.append(message.error('Invalid instructions at %#x' % pc))
# this will trigger an exploratory search.
pwndbg.vmmap.find(pc)
# Gather all addresses and symbols for each instruction
symbols = [pwndbg.symbol.get(i.address) for i in instructions]
addresses = ['%
nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0
# Format the symbol name for each instruction
symbols = ['<%s> ' % sym if sym else '' for sym in symbols]
# Pad out all of the symbols and addresses
if pwndbg.config.left_pad_disasm and not nearpc.repeat:
symbols = ljust_padding(symbols)
addresses = ljust_padding(addresses)
prev = None
# Print out each instruction
for address_str, symbol, instr in zip(addresses, symbols, instructions):
asm = D.instruction(instr)
prefix_sign = pwndbg.config.nearpc_prefix
# Show prefix only on the specified address and don't show it while in repeat-mode
show_prefix = instr.address == pc and not nearpc.repeat
prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign))
prefix = N.prefix(prefix)
pre = pwndbg.ida.Anterior(instr.address)
if pre:
result.append(N.ida_anterior(pre))
if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat:
address_str = N.address(address_str)
symbol = N.symbol(symbol)
elif pwndbg.config.highlight_pc:
prefix = C.highlight(prefix)
address_str = C.highlight(address_str)
symbol = C.highlight(symbol)
line = ' '.join((prefix, address_str, symbol, asm))
if prev and prev.address + prev.size != instr.address:
result.append(N.branch_marker('%s' % nearpc_branch_marker))
# and empty line.
elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)):
if len('%s' % nearpc_branch_marker_contiguous) > 0:
result.append('%s' % nearpc_branch_marker_contiguous)
# For syscall instructions, put the name on the side
if instr.address == pc:
syscall_name = pwndbg.arguments.get_syscall_name(instr)
if syscall_name:
line += ' <%s>' % N.syscall_name(syscall_name)
result.append(line)
# For call instructions, attempt to resolve the target and
# determine the number of arguments.
if show_args:
result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)])
prev = instr
if not to_string:
print('\n'.join(result))
return result
parser = argparse.ArgumentParser(description='''Like nearpc, but will emulate instructions from the current $PC forward.''')
parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to emulate near.")
parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def emulate(pc=None, lines=None, to_string=False, emulate=True):
nearpc.repeat = emulate_command.repeat
return nearpc(pc, lines, to_string, emulate)
emulate_command = emulate
parser = argparse.ArgumentParser(description='''Compatibility layer for PEDA's pdisass command.''')
parser.add_argument("pc", type=int, nargs="?", default=None, help="Address to disassemble near.")
parser.add_argument("lines", type=int, nargs="?", default=None, help="Number of lines to show on either side of the address.")
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def pdisass(pc=None, lines=None, to_string=False):
nearpc.repeat = pdisass.repeat
return nearpc(pc, lines, to_string, False)
nearpc.next_pc = 0
| true | true |
f731df9decd333bae50c1e2a514b5c83b2a8f9b3 | 5,684 | py | Python | scripts/fig3_locking_across_frequencies.py | fabiansinz/locker | 9ca397d0a9aa747552bc43188b07056b87c6e9f0 | [
"MIT"
] | null | null | null | scripts/fig3_locking_across_frequencies.py | fabiansinz/locker | 9ca397d0a9aa747552bc43188b07056b87c6e9f0 | [
"MIT"
] | null | null | null | scripts/fig3_locking_across_frequencies.py | fabiansinz/locker | 9ca397d0a9aa747552bc43188b07056b87c6e9f0 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
from matplotlib.collections import PolyCollection
from numpy.fft import fft, fftfreq, fftshift
from locker import mkdir
from locker.analysis import *
from locker.data import *
from scripts.config import params as plot_params, FormatedFigure
def generate_filename(cell, contrast):
dir = 'figures/figure_locking_across_frequencies/%s/' % (cell['cell_type'],)
mkdir(dir)
return dir + '%s_contrast%.2f.pdf' % (cell['cell_id'], contrast)
def gauss(t, m, v):
return np.exp(-(t - m) ** 2 / 2 / v)
class FigureMechanisms(FormatedFigure):
def prepare(self):
sns.set_context('paper')
sns.set_style('ticks')
with plt.rc_context(plot_params):
self.fig = plt.figure(figsize=(7, 5), dpi=400)
gs = plt.GridSpec(3, 4)
self.ax = {}
self.ax['violin'] = self.fig.add_subplot(gs[:3, 3])
self.ax['spectrum'] = self.fig.add_subplot(gs[:3, :3])
self.gs = gs
@staticmethod
def format_spectrum(ax):
ax.set_xlim((0, 1500))
ax.set_xticks(np.linspace(0, 1500, 7))
ax.legend(bbox_to_anchor=(1.05, 1), bbox_transform=ax.transAxes, ncol=3, frameon=False)
sns.despine(ax=ax, left=True, trim=True, offset=0)
ax.set_yticks([])
ax.set_ylim((-.5, 9.5))
ax.set_xlabel('frequency [Hz]')
ax.text(-0.01, 0.99, 'A', transform=ax.transAxes, fontweight='bold')
@staticmethod
def format_violin(ax):
ax.set_xlim((0, 2 * np.pi))
ax.set_xticks(np.linspace(0, 2 * np.pi, 5))
ax.set_xticklabels([r'$0$', r'$\frac{\pi}{2}$', r'$\pi$', r'$\frac{3\pi}{4}$', r'$2\pi$'])
ax.set_ylabel(r'$\Delta f$ [Hz]')
ax.set_xlabel('phase')
for art in ax.get_children():
if isinstance(art, PolyCollection):
art.set_edgecolor(None)
leg = ax.legend(ncol=1, title='PSTH per cycle of', bbox_to_anchor=(1, 0.97), frameon=False)
plt.setp(leg.get_title(), fontsize=leg.get_texts()[0].get_fontsize())
ax.text(-0.15, 1.01, 'B', transform=ax.transAxes, fontweight='bold', va='top', ha='right')
sns.despine(ax=ax, trim=True, offset=0)
def format_figure(self):
self.ax['violin'].set_ylim([e / .8 for e in self.ax['spectrum'].get_ylim()])
for a in self.ax.values():
a.tick_params(length=3, width=1)
a.spines['bottom'].set_linewidth(1)
a.spines['left'].set_linewidth(1)
self.gs.tight_layout(self.fig)
if __name__ == "__main__":
f_max = 2000 # Hz
N = 10
delta_f = 200
frequency_restriction = '(delta_f > -319) or (delta_f < -381)'
runs = Runs()
for cell in (Cells() & dict(cell_type='p-unit', cell_id="2014-12-03-aj")).fetch(as_dict=True):
# for cell in (Cells() & dict(cell_type='p-unit')).fetch.as_dict:
unit = cell['cell_type']
print('Processing', cell['cell_id'])
# for contrast in [5, 10, 20]:
for contrast in [20]:
print("contrast: %.2f%%" % (contrast,))
target_trials = SecondOrderSpikeSpectra() * runs & cell & \
dict(contrast=contrast, am=0, n_harmonics=0) & frequency_restriction
if target_trials:
with FigureMechanisms(filename=generate_filename(cell, contrast=contrast)) as (fig, ax):
# --- plot spectra
y = [0]
stim_freq, eod_freq, deltaf_freq = [], [], []
done = []
for i, spec in enumerate(sorted(target_trials.fetch(as_dict=True), key=lambda x: x['delta_f'])):
if spec['delta_f'] in done:
continue
else:
done.append(spec['delta_f'])
print(u"\t\t\u0394 f=%.2f" % spec['delta_f'])
f, v = spec['frequencies'], spec['vector_strengths']
idx = (f >= 0) & (f <= f_max) & ~np.isnan(v)
ax['spectrum'].fill_between(f[idx], y[-1] + 0 * f[idx], y[-1] + v[idx], lw=0,
color='k')
if i == 0:
ax['spectrum'].plot([20, 20], [8., 8.5], '-', color='k', lw=2,
solid_capstyle='butt')
ax['spectrum'].text(40, 8.15, '0.5 vector strength', fontsize=6)
y.append(y[-1] + .8)
stim_freq.append(spec['eod'] + spec['delta_f'])
deltaf_freq.append(spec['delta_f'])
eod_freq.append(spec['eod'])
ax['spectrum'].plot(eod_freq, y[:-1], '-', alpha=.25, zorder=-10, lw=4, color=colordict['eod'],
label='EODf')
ax['spectrum'].plot(stim_freq, y[:-1], '-', alpha=.25, zorder=-10, lw=4,
color=colordict['stimulus'],
label='stimulus')
ax['spectrum'].plot(np.abs(deltaf_freq), y[:-1], '-', alpha=.25, zorder=-10, lw=4,
color=colordict['delta_f'],
label=r'$|\Delta f|$')
# --- plot locking
PhaseLockingHistogram().violin_plot(ax['violin'], restrictions=target_trials.proj(),
palette=[colordict['eod'], colordict['stimulus']])
ax['violin'].legend().set_visible(False)
| 43.389313 | 116 | 0.508797 | import matplotlib
matplotlib.use('Agg')
from matplotlib.collections import PolyCollection
from numpy.fft import fft, fftfreq, fftshift
from locker import mkdir
from locker.analysis import *
from locker.data import *
from scripts.config import params as plot_params, FormatedFigure
def generate_filename(cell, contrast):
dir = 'figures/figure_locking_across_frequencies/%s/' % (cell['cell_type'],)
mkdir(dir)
return dir + '%s_contrast%.2f.pdf' % (cell['cell_id'], contrast)
def gauss(t, m, v):
return np.exp(-(t - m) ** 2 / 2 / v)
class FigureMechanisms(FormatedFigure):
def prepare(self):
sns.set_context('paper')
sns.set_style('ticks')
with plt.rc_context(plot_params):
self.fig = plt.figure(figsize=(7, 5), dpi=400)
gs = plt.GridSpec(3, 4)
self.ax = {}
self.ax['violin'] = self.fig.add_subplot(gs[:3, 3])
self.ax['spectrum'] = self.fig.add_subplot(gs[:3, :3])
self.gs = gs
@staticmethod
def format_spectrum(ax):
ax.set_xlim((0, 1500))
ax.set_xticks(np.linspace(0, 1500, 7))
ax.legend(bbox_to_anchor=(1.05, 1), bbox_transform=ax.transAxes, ncol=3, frameon=False)
sns.despine(ax=ax, left=True, trim=True, offset=0)
ax.set_yticks([])
ax.set_ylim((-.5, 9.5))
ax.set_xlabel('frequency [Hz]')
ax.text(-0.01, 0.99, 'A', transform=ax.transAxes, fontweight='bold')
@staticmethod
def format_violin(ax):
ax.set_xlim((0, 2 * np.pi))
ax.set_xticks(np.linspace(0, 2 * np.pi, 5))
ax.set_xticklabels([r'$0$', r'$\frac{\pi}{2}$', r'$\pi$', r'$\frac{3\pi}{4}$', r'$2\pi$'])
ax.set_ylabel(r'$\Delta f$ [Hz]')
ax.set_xlabel('phase')
for art in ax.get_children():
if isinstance(art, PolyCollection):
art.set_edgecolor(None)
leg = ax.legend(ncol=1, title='PSTH per cycle of', bbox_to_anchor=(1, 0.97), frameon=False)
plt.setp(leg.get_title(), fontsize=leg.get_texts()[0].get_fontsize())
ax.text(-0.15, 1.01, 'B', transform=ax.transAxes, fontweight='bold', va='top', ha='right')
sns.despine(ax=ax, trim=True, offset=0)
def format_figure(self):
self.ax['violin'].set_ylim([e / .8 for e in self.ax['spectrum'].get_ylim()])
for a in self.ax.values():
a.tick_params(length=3, width=1)
a.spines['bottom'].set_linewidth(1)
a.spines['left'].set_linewidth(1)
self.gs.tight_layout(self.fig)
if __name__ == "__main__":
f_max = 2000
N = 10
delta_f = 200
frequency_restriction = '(delta_f > -319) or (delta_f < -381)'
runs = Runs()
for cell in (Cells() & dict(cell_type='p-unit', cell_id="2014-12-03-aj")).fetch(as_dict=True):
unit = cell['cell_type']
print('Processing', cell['cell_id'])
for contrast in [20]:
print("contrast: %.2f%%" % (contrast,))
target_trials = SecondOrderSpikeSpectra() * runs & cell & \
dict(contrast=contrast, am=0, n_harmonics=0) & frequency_restriction
if target_trials:
with FigureMechanisms(filename=generate_filename(cell, contrast=contrast)) as (fig, ax):
y = [0]
stim_freq, eod_freq, deltaf_freq = [], [], []
done = []
for i, spec in enumerate(sorted(target_trials.fetch(as_dict=True), key=lambda x: x['delta_f'])):
if spec['delta_f'] in done:
continue
else:
done.append(spec['delta_f'])
print(u"\t\t\u0394 f=%.2f" % spec['delta_f'])
f, v = spec['frequencies'], spec['vector_strengths']
idx = (f >= 0) & (f <= f_max) & ~np.isnan(v)
ax['spectrum'].fill_between(f[idx], y[-1] + 0 * f[idx], y[-1] + v[idx], lw=0,
color='k')
if i == 0:
ax['spectrum'].plot([20, 20], [8., 8.5], '-', color='k', lw=2,
solid_capstyle='butt')
ax['spectrum'].text(40, 8.15, '0.5 vector strength', fontsize=6)
y.append(y[-1] + .8)
stim_freq.append(spec['eod'] + spec['delta_f'])
deltaf_freq.append(spec['delta_f'])
eod_freq.append(spec['eod'])
ax['spectrum'].plot(eod_freq, y[:-1], '-', alpha=.25, zorder=-10, lw=4, color=colordict['eod'],
label='EODf')
ax['spectrum'].plot(stim_freq, y[:-1], '-', alpha=.25, zorder=-10, lw=4,
color=colordict['stimulus'],
label='stimulus')
ax['spectrum'].plot(np.abs(deltaf_freq), y[:-1], '-', alpha=.25, zorder=-10, lw=4,
color=colordict['delta_f'],
label=r'$|\Delta f|$')
PhaseLockingHistogram().violin_plot(ax['violin'], restrictions=target_trials.proj(),
palette=[colordict['eod'], colordict['stimulus']])
ax['violin'].legend().set_visible(False)
| true | true |
f731e03ae1b3564391dc273fed1062bf7757db8f | 3,023 | py | Python | src/caravaggio_rest_api/users/admin.py | intellstartup/django-caravaggio-rest-api | 98a530f7c0c38860561fac2f57476cf40f03e3d7 | [
"MIT"
] | null | null | null | src/caravaggio_rest_api/users/admin.py | intellstartup/django-caravaggio-rest-api | 98a530f7c0c38860561fac2f57476cf40f03e3d7 | [
"MIT"
] | null | null | null | src/caravaggio_rest_api/users/admin.py | intellstartup/django-caravaggio-rest-api | 98a530f7c0c38860561fac2f57476cf40f03e3d7 | [
"MIT"
] | 1 | 2020-03-03T22:48:51.000Z | 2020-03-03T22:48:51.000Z | # -*- coding: utf-8 -*
# Copyright (c) 2019 BuildGroup Data Services, Inc.
# All rights reserved.
# This software is proprietary and confidential and may not under
# any circumstances be used, copied, or distributed.
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from caravaggio_rest_api.users.models import CaravaggioOrganization, CaravaggioClient, CaravaggioUser
from caravaggio_rest_api.users.forms import CaravaggioUserCreationForm, CaravaggioUserChangeForm
from django.utils.translation import gettext_lazy as _
class CaravaggioClientAdmin(admin.ModelAdmin):
model = CaravaggioClient
fieldsets = (
(None, {"fields": ("id", "email")}),
(_("Client info"), {"fields": ("name",)}),
(_("Permissions"), {"fields": ("is_active",),}),
(_("Important dates"), {"fields": ("date_joined", "date_deactivated")}),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("id", "email"),}),)
list_display = ("id", "email", "name", "is_active")
search_fields = ("id", "email", "name", "is_active")
ordering = ("id", "email", "name")
class CaravaggioOrganizationAdmin(admin.ModelAdmin):
model = CaravaggioOrganization
fieldsets = (
(None, {"fields": ("id", "email")}),
(_("Organization info"), {"fields": ("name",)}),
(_("Users"), {"fields": ("owner", "administrators", "members", "restricted_members"),}),
(_("Permissions"), {"fields": ("is_active",),}),
(_("Important dates"), {"fields": ("created", "updated")}),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("id", "email"),}),)
list_display = ("id", "email", "name", "owner", "is_active")
search_fields = ("id", "email", "name", "owner", "is_active", "created", "updated")
ordering = ("id", "email", "name", "created", "updated")
class CaravaggioUserAdmin(UserAdmin):
add_form = CaravaggioUserCreationForm
form = CaravaggioUserChangeForm
model = CaravaggioUser
# (_('Organizations'), {
# 'fields': ('owner_of', 'administrator_of', 'member_of',
# 'restricted_member_of'),
# }),
fieldsets = (
(None, {"fields": ("client", "email", "password")}),
(_("Personal info"), {"fields": ("first_name", "last_name")}),
(_("Permissions"), {"fields": ("is_active", "is_staff", "is_superuser", "groups", "user_permissions"),}),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("client", "email", "password1", "password2"),}),)
list_display = ("client", "email", "first_name", "last_name", "is_staff")
search_fields = ("client__id", "email", "first_name", "last_name")
ordering = (
"client__id",
"email",
)
admin.site.register(CaravaggioUser, CaravaggioUserAdmin)
admin.site.register(CaravaggioClient, CaravaggioClientAdmin)
admin.site.register(CaravaggioOrganization, CaravaggioOrganizationAdmin)
| 38.75641 | 113 | 0.629176 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from caravaggio_rest_api.users.models import CaravaggioOrganization, CaravaggioClient, CaravaggioUser
from caravaggio_rest_api.users.forms import CaravaggioUserCreationForm, CaravaggioUserChangeForm
from django.utils.translation import gettext_lazy as _
class CaravaggioClientAdmin(admin.ModelAdmin):
model = CaravaggioClient
fieldsets = (
(None, {"fields": ("id", "email")}),
(_("Client info"), {"fields": ("name",)}),
(_("Permissions"), {"fields": ("is_active",),}),
(_("Important dates"), {"fields": ("date_joined", "date_deactivated")}),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("id", "email"),}),)
list_display = ("id", "email", "name", "is_active")
search_fields = ("id", "email", "name", "is_active")
ordering = ("id", "email", "name")
class CaravaggioOrganizationAdmin(admin.ModelAdmin):
model = CaravaggioOrganization
fieldsets = (
(None, {"fields": ("id", "email")}),
(_("Organization info"), {"fields": ("name",)}),
(_("Users"), {"fields": ("owner", "administrators", "members", "restricted_members"),}),
(_("Permissions"), {"fields": ("is_active",),}),
(_("Important dates"), {"fields": ("created", "updated")}),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("id", "email"),}),)
list_display = ("id", "email", "name", "owner", "is_active")
search_fields = ("id", "email", "name", "owner", "is_active", "created", "updated")
ordering = ("id", "email", "name", "created", "updated")
class CaravaggioUserAdmin(UserAdmin):
add_form = CaravaggioUserCreationForm
form = CaravaggioUserChangeForm
model = CaravaggioUser
fieldsets = (
(None, {"fields": ("client", "email", "password")}),
(_("Personal info"), {"fields": ("first_name", "last_name")}),
(_("Permissions"), {"fields": ("is_active", "is_staff", "is_superuser", "groups", "user_permissions"),}),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
add_fieldsets = ((None, {"classes": ("wide",), "fields": ("client", "email", "password1", "password2"),}),)
list_display = ("client", "email", "first_name", "last_name", "is_staff")
search_fields = ("client__id", "email", "first_name", "last_name")
ordering = (
"client__id",
"email",
)
admin.site.register(CaravaggioUser, CaravaggioUserAdmin)
admin.site.register(CaravaggioClient, CaravaggioClientAdmin)
admin.site.register(CaravaggioOrganization, CaravaggioOrganizationAdmin)
| true | true |
f731e044c42d70929ca1dde381c15c75b32e29b6 | 4,826 | py | Python | packages/fetchai/skills/thermometer/strategy.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | null | null | null | packages/fetchai/skills/thermometer/strategy.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | 1 | 2020-02-21T14:28:13.000Z | 2020-03-05T14:53:53.000Z | packages/fetchai/skills/thermometer/strategy.py | cyenyxe/agents-aea | c2aec9127028ae13def3f69fbc80d35400de1565 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the strategy class."""
import logging
from random import randrange
from typing import Any, Dict, Tuple
from temper import Temper
from aea.helpers.search.models import Description, Query
from aea.mail.base import Address
from aea.skills.base import Model
from packages.fetchai.skills.thermometer.thermometer_data_model import (
SCHEME,
Thermometer_Datamodel,
)
DEFAULT_PRICE_PER_ROW = 1
DEFAULT_SELLER_TX_FEE = 0
DEFAULT_CURRENCY_PBK = "FET"
DEFAULT_LEDGER_ID = "fetchai"
DEFAULT_IS_LEDGER_TX = True
DEFAULT_HAS_SENSOR = True
logger = logging.getLogger(__name__)
class Strategy(Model):
"""This class defines a strategy for the agent."""
def __init__(self, **kwargs) -> None:
"""
Initialize the strategy of the agent.
:param register_as: determines whether the agent registers as seller, buyer or both
:param search_for: determines whether the agent searches for sellers, buyers or both
:return: None
"""
self._price_per_row = kwargs.pop("price_per_row", DEFAULT_PRICE_PER_ROW)
self._seller_tx_fee = kwargs.pop("seller_tx_fee", DEFAULT_SELLER_TX_FEE)
self._currency_id = kwargs.pop("currency_id", DEFAULT_CURRENCY_PBK)
self._ledger_id = kwargs.pop("ledger_id", DEFAULT_LEDGER_ID)
self.is_ledger_tx = kwargs.pop("is_ledger_tx", DEFAULT_IS_LEDGER_TX)
self._has_sensor = kwargs.pop("has_sensor", DEFAULT_HAS_SENSOR)
super().__init__(**kwargs)
self._oef_msg_id = 0
def get_next_oef_msg_id(self) -> int:
"""
Get the next oef msg id.
:return: the next oef msg id
"""
self._oef_msg_id += 1
return self._oef_msg_id
def get_service_description(self) -> Description:
"""
Get the service description.
:return: a description of the offered services
"""
desc = Description(SCHEME, data_model=Thermometer_Datamodel())
return desc
def is_matching_supply(self, query: Query) -> bool:
"""
Check if the query matches the supply.
:param query: the query
:return: bool indiciating whether matches or not
"""
# TODO, this is a stub
return True
def generate_proposal_and_data(
self, query: Query, counterparty: Address
) -> Tuple[Description, Dict[str, Any]]:
"""
Generate a proposal matching the query.
:param counterparty: the counterparty of the proposal.
:param query: the query
:return: a tuple of proposal and the temprature data
"""
tx_nonce = self.context.ledger_apis.generate_tx_nonce(
identifier=self._ledger_id,
seller=self.context.agent_addresses[self._ledger_id],
client=counterparty,
)
temp_data = self._build_data_payload()
total_price = self._price_per_row
assert (
total_price - self._seller_tx_fee > 0
), "This sale would generate a loss, change the configs!"
proposal = Description(
{
"price": total_price,
"seller_tx_fee": self._seller_tx_fee,
"currency_id": self._currency_id,
"ledger_id": self._ledger_id,
"tx_nonce": tx_nonce,
}
)
return proposal, temp_data
def _build_data_payload(self) -> Dict[str, Any]:
"""
Build the data payload.
:return: a tuple of the data and the rows
"""
if self._has_sensor:
temper = Temper()
while True:
results = temper.read()
if "internal temperature" in results[0].keys():
degrees = {"thermometer_data": results}
else:
logger.debug("Couldn't read the sensor I am re-trying.")
else:
degrees = {"thermometer_data": randrange(10, 25)} # nosec
logger.info(degrees)
return degrees
| 32.829932 | 92 | 0.61666 |
import logging
from random import randrange
from typing import Any, Dict, Tuple
from temper import Temper
from aea.helpers.search.models import Description, Query
from aea.mail.base import Address
from aea.skills.base import Model
from packages.fetchai.skills.thermometer.thermometer_data_model import (
SCHEME,
Thermometer_Datamodel,
)
DEFAULT_PRICE_PER_ROW = 1
DEFAULT_SELLER_TX_FEE = 0
DEFAULT_CURRENCY_PBK = "FET"
DEFAULT_LEDGER_ID = "fetchai"
DEFAULT_IS_LEDGER_TX = True
DEFAULT_HAS_SENSOR = True
logger = logging.getLogger(__name__)
class Strategy(Model):
def __init__(self, **kwargs) -> None:
self._price_per_row = kwargs.pop("price_per_row", DEFAULT_PRICE_PER_ROW)
self._seller_tx_fee = kwargs.pop("seller_tx_fee", DEFAULT_SELLER_TX_FEE)
self._currency_id = kwargs.pop("currency_id", DEFAULT_CURRENCY_PBK)
self._ledger_id = kwargs.pop("ledger_id", DEFAULT_LEDGER_ID)
self.is_ledger_tx = kwargs.pop("is_ledger_tx", DEFAULT_IS_LEDGER_TX)
self._has_sensor = kwargs.pop("has_sensor", DEFAULT_HAS_SENSOR)
super().__init__(**kwargs)
self._oef_msg_id = 0
def get_next_oef_msg_id(self) -> int:
self._oef_msg_id += 1
return self._oef_msg_id
def get_service_description(self) -> Description:
desc = Description(SCHEME, data_model=Thermometer_Datamodel())
return desc
def is_matching_supply(self, query: Query) -> bool:
return True
def generate_proposal_and_data(
self, query: Query, counterparty: Address
) -> Tuple[Description, Dict[str, Any]]:
tx_nonce = self.context.ledger_apis.generate_tx_nonce(
identifier=self._ledger_id,
seller=self.context.agent_addresses[self._ledger_id],
client=counterparty,
)
temp_data = self._build_data_payload()
total_price = self._price_per_row
assert (
total_price - self._seller_tx_fee > 0
), "This sale would generate a loss, change the configs!"
proposal = Description(
{
"price": total_price,
"seller_tx_fee": self._seller_tx_fee,
"currency_id": self._currency_id,
"ledger_id": self._ledger_id,
"tx_nonce": tx_nonce,
}
)
return proposal, temp_data
def _build_data_payload(self) -> Dict[str, Any]:
if self._has_sensor:
temper = Temper()
while True:
results = temper.read()
if "internal temperature" in results[0].keys():
degrees = {"thermometer_data": results}
else:
logger.debug("Couldn't read the sensor I am re-trying.")
else:
degrees = {"thermometer_data": randrange(10, 25)} # nosec
logger.info(degrees)
return degrees
| true | true |
f731e2cb91bc067b57e8218188dc62edb4b4ad1a | 3,754 | py | Python | utils/text_fallback.py | laura-ham/INTEX | 07bcf26ca17092ecf4fc41b85ea9d764a0caa8c9 | [
"MIT"
] | null | null | null | utils/text_fallback.py | laura-ham/INTEX | 07bcf26ca17092ecf4fc41b85ea9d764a0caa8c9 | [
"MIT"
] | null | null | null | utils/text_fallback.py | laura-ham/INTEX | 07bcf26ca17092ecf4fc41b85ea9d764a0caa8c9 | [
"MIT"
] | null | null | null | import nltk
import pandas as pd
try:
import libvoikko
except ModuleNotFoundError:
from voikko import libvoikko
import logging
from nltk.corpus import stopwords
logger = logging.getLogger(__name__)
nltk.download('stopwords')
EMAIL_REGEX = (
r"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)"
r"*|\"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\"
r"[\x01-\x09\x0b\x0c\x0e-\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])"
r"?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])"
r"?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.)"
r"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]"
r":(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]"
r"|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])")
URL_REGEX = (r"(https?:\/\/(?:www\.|(?!www))"
r"[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]"
r"\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]"
r"\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))"
r"[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]\.[^\s]{2,})")
URL_EMAIL_REGEX = '|'.join([EMAIL_REGEX, URL_REGEX])
FIN_STOPS = set(stopwords.words('finnish'))
FIN_STOPS.update([
'hei', 'moi', 'moikka', 'moro', 'tervehdys', 'terve', 'terveisin', 'siis',
'myös', 'kiitos', 'yle', 'uutisikkuna', 'kiitoksia', 'kiitosta', 'ok',
'eli', 'okei', 'no', 'sitten', 'jo', 'vielä', 'aina', 'jotta'
])
DEL_FROM_FINSTOPS = ['en', 'et', 'ei', 'emme', 'ette', 'eivät']
for word in DEL_FROM_FINSTOPS:
FIN_STOPS.remove(word)
SWE_STOPS = set(stopwords.words('swedish'))
SWE_STOPS.remove('min')
SWE_STOPS.update(['swe', 'svenska', 'dag', 'buu', 'klubben', 'fråga', 'veckans', 'jag'])
EN_STOPS = set(stopwords.words('english'))
EN_STOPS.update(['of', 'and', 'you', 'for', 'what', 'have', 'can'])
DEL_FROM_ENSTOPS = ['on', 'as', 'a', 'd', 'm', 'o', 's', 't', 'me', 'no', 'y']
for word in DEL_FROM_ENSTOPS:
EN_STOPS.remove(word)
OTHER_STOPS = set([
'mailto', 'subject', 'from', 'to', 'vs', 'message', 'original', 'date',
're', 'terv', 'sent', 'from', 'kello', 'fin', 'swe', 'uutisikkuna'
])
FINAL_STOPS = FIN_STOPS | OTHER_STOPS
voikko = libvoikko.Voikko('fi')
voikko.setIgnoreDot(True)
def _fin_lemmatize_word(string):
voikkofied = voikko.analyze(string)
if len(voikkofied) > 0 and voikkofied[0].get('BASEFORM') is not None:
return voikkofied[0]['BASEFORM']
else:
return string
def _finnish_detector(text):
token_set = set(text.split())
n_fi = len(token_set.intersection(FIN_STOPS))
n_swe = len(token_set.intersection(SWE_STOPS))
n_en = len(token_set.intersection(EN_STOPS))
return (n_fi > n_en) & (n_fi > n_swe)
def process_and_filter(words):
return [_fin_lemmatize_word(word) for word in words.split()
if len(word) > 1 and word not in FINAL_STOPS]
def _process_text(text_series):
# Remove emails and URLs
text_series = text_series.str.replace(URL_EMAIL_REGEX, ' ').lower()
# Remove all except letters and whitespace
text_series = text_series.str.replace(':', '').replace('[^A-Za-zÄÖÅäöå]', ' ')
# Remove stop words and 1 char tokens, then lemmatize with Voikko
# Return as lowercased string as Voikko uppercases some words
return text_series.apply(process_and_filter).str.join(' ').str.lower()
def process_single_text(text):
text = text.replace(URL_EMAIL_REGEX, ' ')
text = ''.join([c if c.isalpha() else ' ' for c in text.lower()])
return ' '.join(process_and_filter(text)).lower()
def process_text_df(df, text_col):
# Remove very short texts
df = df[df[text_col].str.len() > 5]
# Remove non-Finnish documents
df = df[df[text_col].apply(_finnish_detector)]
# Preprocess text
df[text_col] = _process_text(df[text_col])
| 33.81982 | 88 | 0.600693 | import nltk
import pandas as pd
try:
import libvoikko
except ModuleNotFoundError:
from voikko import libvoikko
import logging
from nltk.corpus import stopwords
logger = logging.getLogger(__name__)
nltk.download('stopwords')
EMAIL_REGEX = (
r"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)"
r"*|\"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\"
r"[\x01-\x09\x0b\x0c\x0e-\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])"
r"?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])"
r"?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.)"
r"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]"
r":(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]"
r"|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])")
URL_REGEX = (r"(https?:\/\/(?:www\.|(?!www))"
r"[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]"
r"\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]"
r"\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))"
r"[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]\.[^\s]{2,})")
URL_EMAIL_REGEX = '|'.join([EMAIL_REGEX, URL_REGEX])
FIN_STOPS = set(stopwords.words('finnish'))
FIN_STOPS.update([
'hei', 'moi', 'moikka', 'moro', 'tervehdys', 'terve', 'terveisin', 'siis',
'myös', 'kiitos', 'yle', 'uutisikkuna', 'kiitoksia', 'kiitosta', 'ok',
'eli', 'okei', 'no', 'sitten', 'jo', 'vielä', 'aina', 'jotta'
])
DEL_FROM_FINSTOPS = ['en', 'et', 'ei', 'emme', 'ette', 'eivät']
for word in DEL_FROM_FINSTOPS:
FIN_STOPS.remove(word)
SWE_STOPS = set(stopwords.words('swedish'))
SWE_STOPS.remove('min')
SWE_STOPS.update(['swe', 'svenska', 'dag', 'buu', 'klubben', 'fråga', 'veckans', 'jag'])
EN_STOPS = set(stopwords.words('english'))
EN_STOPS.update(['of', 'and', 'you', 'for', 'what', 'have', 'can'])
DEL_FROM_ENSTOPS = ['on', 'as', 'a', 'd', 'm', 'o', 's', 't', 'me', 'no', 'y']
for word in DEL_FROM_ENSTOPS:
EN_STOPS.remove(word)
OTHER_STOPS = set([
'mailto', 'subject', 'from', 'to', 'vs', 'message', 'original', 'date',
're', 'terv', 'sent', 'from', 'kello', 'fin', 'swe', 'uutisikkuna'
])
FINAL_STOPS = FIN_STOPS | OTHER_STOPS
voikko = libvoikko.Voikko('fi')
voikko.setIgnoreDot(True)
def _fin_lemmatize_word(string):
voikkofied = voikko.analyze(string)
if len(voikkofied) > 0 and voikkofied[0].get('BASEFORM') is not None:
return voikkofied[0]['BASEFORM']
else:
return string
def _finnish_detector(text):
token_set = set(text.split())
n_fi = len(token_set.intersection(FIN_STOPS))
n_swe = len(token_set.intersection(SWE_STOPS))
n_en = len(token_set.intersection(EN_STOPS))
return (n_fi > n_en) & (n_fi > n_swe)
def process_and_filter(words):
return [_fin_lemmatize_word(word) for word in words.split()
if len(word) > 1 and word not in FINAL_STOPS]
def _process_text(text_series):
text_series = text_series.str.replace(URL_EMAIL_REGEX, ' ').lower()
text_series = text_series.str.replace(':', '').replace('[^A-Za-zÄÖÅäöå]', ' ')
return text_series.apply(process_and_filter).str.join(' ').str.lower()
def process_single_text(text):
text = text.replace(URL_EMAIL_REGEX, ' ')
text = ''.join([c if c.isalpha() else ' ' for c in text.lower()])
return ' '.join(process_and_filter(text)).lower()
def process_text_df(df, text_col):
df = df[df[text_col].str.len() > 5]
df = df[df[text_col].apply(_finnish_detector)]
df[text_col] = _process_text(df[text_col])
| true | true |
f731e3087e60474a011882b5a3c220186114ae19 | 979 | py | Python | tests/tests_helpers.py | thsis/NIS18 | 1f2a7be1ab209fa7c0a25cb8eace744336b07c1f | [
"MIT"
] | null | null | null | tests/tests_helpers.py | thsis/NIS18 | 1f2a7be1ab209fa7c0a25cb8eace744336b07c1f | [
"MIT"
] | null | null | null | tests/tests_helpers.py | thsis/NIS18 | 1f2a7be1ab209fa7c0a25cb8eace744336b07c1f | [
"MIT"
] | null | null | null | import numpy as np
from algorithms import helpers
def test_QR(Ntests):
passed = 0
critical = 0
for _ in range(Ntests):
try:
n = np.random.randint(2, 11)
X = np.random.uniform(low=0.0,
high=100.0,
size=(n, n))
Q, R = helpers.qr_factorize(X)
assert all(np.isclose(Q.dot(R), X).flatten())
passed += 1
except AssertionError:
print("AssertionError with:")
print(X)
continue
except Exception:
print("Other Error with:")
print(X)
critical += 1
print("Test Results:")
print("Passed {} of {} Tests.".format(passed, Ntests))
print("Failed {} tests.".format(Ntests-passed-critical))
print("{} tests failed critically".format(critical))
if passed == Ntests:
return True
else:
return False
assert test_QR(1000)
| 26.459459 | 60 | 0.514811 | import numpy as np
from algorithms import helpers
def test_QR(Ntests):
passed = 0
critical = 0
for _ in range(Ntests):
try:
n = np.random.randint(2, 11)
X = np.random.uniform(low=0.0,
high=100.0,
size=(n, n))
Q, R = helpers.qr_factorize(X)
assert all(np.isclose(Q.dot(R), X).flatten())
passed += 1
except AssertionError:
print("AssertionError with:")
print(X)
continue
except Exception:
print("Other Error with:")
print(X)
critical += 1
print("Test Results:")
print("Passed {} of {} Tests.".format(passed, Ntests))
print("Failed {} tests.".format(Ntests-passed-critical))
print("{} tests failed critically".format(critical))
if passed == Ntests:
return True
else:
return False
assert test_QR(1000)
| true | true |
f731e5102507b94eeb51094b340932c5317bf095 | 46 | py | Python | tf1.6/hello_world.py | sgeos/tensorflow_playground | c4c10b74b1eebb63f0bf8de7a9a976c11f025618 | [
"CC0-1.0"
] | null | null | null | tf1.6/hello_world.py | sgeos/tensorflow_playground | c4c10b74b1eebb63f0bf8de7a9a976c11f025618 | [
"CC0-1.0"
] | null | null | null | tf1.6/hello_world.py | sgeos/tensorflow_playground | c4c10b74b1eebb63f0bf8de7a9a976c11f025618 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
print("Hello World!")
| 9.2 | 21 | 0.652174 |
print("Hello World!")
| true | true |
f731e51078bcba5985a20831c62f2df1e230b71b | 9,066 | py | Python | Layers/LSTM.py | starasteh/DeepLearning_from_scratch | 6ed4685e4da57ad5ea51edf84010f2cc9725a2ba | [
"MIT"
] | 6 | 2020-09-13T17:03:26.000Z | 2021-05-16T14:03:59.000Z | Layers/LSTM.py | starasteh/DeepLearning_from_scratch | 6ed4685e4da57ad5ea51edf84010f2cc9725a2ba | [
"MIT"
] | null | null | null | Layers/LSTM.py | starasteh/DeepLearning_from_scratch | 6ed4685e4da57ad5ea51edf84010f2cc9725a2ba | [
"MIT"
] | 2 | 2021-03-17T02:20:42.000Z | 2021-04-24T16:28:45.000Z | '''
Created on January 2020.
@author: Soroosh Tayebi Arasteh <soroosh.arasteh@fau.de>
https://github.com/tayebiarasteh/
'''
from Layers.Base import *
import numpy as np
import pdb
from Layers import Sigmoid, FullyConnected, TanH
import copy
class LSTM(base_layer):
def __init__(self, input_size, hidden_size, output_size):
'''
:input_size: denotes the dimension of the input vector
:hidden_size: denotes the dimension of the hidden state.
'''
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.hidden_state = np.zeros((self.hidden_size))
self.cell_state = np.zeros((self.hidden_size))
# Sets the boolean state representing whether the RNN
# regards subsequent sequences as a belonging to the same long sequence.
self._memorize = False
self._optimizer = None
self._gradient_weights = 0
# The weights are defined as the weights which are involved in calculating the
# hidden state as a stacked tensor. E.g. if the hidden state is computed with
# a single Fully Connected layer, which receives a stack of the hidden state
# and the input tensor, the weights of this particular Fully Connected Layer,
# are the weights considered to be weights for the whole class.
self._weights = None
self.sigmoid1 = Sigmoid.Sigmoid()
self.sigmoid2 = Sigmoid.Sigmoid()
self.sigmoid3 = Sigmoid.Sigmoid()
self.sigmoid4 = Sigmoid.Sigmoid()
self.tanh1 = TanH.TanH()
self.tanh2 = TanH.TanH()
self.fully_middle = FullyConnected.FullyConnected(input_size=input_size + hidden_size ,
output_size=4 * hidden_size)
self.fully_out = FullyConnected.FullyConnected(input_size=hidden_size, output_size=output_size)
def forward(self, input_tensor):
output_tensor = np.zeros((input_tensor.shape[0], self.output_size))
# activations and inputs for the backward
self.o = []
self.i = []
self.f = []
self.C_tilda = []
self.cell_state_b = []
self.hidden_state_b = []
self.tanh2_out = []
self.fully_middle_input = []
if self._memorize == False:
self.hidden_state = np.zeros((self.hidden_size))
self.cell_state = np.zeros((self.hidden_size))
self.cell_state_b.append(self.cell_state)
self.hidden_state_b.append(np.zeros((self.hidden_size + 1)))
# giving inputs sequentially
for t, batch in enumerate(input_tensor):
# Concatenation of input and previous hidden state
X_tilda = np.concatenate([self.hidden_state, batch])
# first fully connected layer
fully_middle_out = self.fully_middle.forward(X_tilda)
self.fully_middle_input.append((self.fully_middle.input_tensor))
'''deconcatenating to 4 vectors'''
# Calculate forget gate
f = self.sigmoid1.forward(fully_middle_out[:fully_middle_out.shape[0]//4])
self.f.append(f)
# Calculate input gate
i = self.sigmoid2.forward(fully_middle_out[fully_middle_out.shape[0]//4:fully_middle_out.shape[0]//2])
self.i.append(i)
# Calculate candidate
C_tilda = self.tanh1.forward(fully_middle_out[fully_middle_out.shape[0]//2: 3*fully_middle_out.shape[0]//4])
self.C_tilda.append(C_tilda)
# Calculate memory state
self.cell_state = f * self.cell_state + i * C_tilda
self.cell_state_b.append(self.cell_state)
# Calculate output gate
o = self.sigmoid3.forward(fully_middle_out[3*fully_middle_out.shape[0]//4:])
self.o.append(o)
# tanh2 output
tanh2_out = self.tanh2.forward(self.cell_state)
self.tanh2_out.append(tanh2_out)
# Calculate hidden state
self.hidden_state = o * tanh2_out
# Calculate logits
y = self.fully_out.forward(self.hidden_state)
self.hidden_state_b.append(self.fully_out.input_tensor)
y = self.sigmoid4.forward(y)
output_tensor[t] = y
self.output_tensor = output_tensor
return output_tensor
def backward(self, error_tensor):
gradient_input = np.zeros((error_tensor.shape[0], self.input_size))
# initializing the hidden and cell state gradients
gradient_hidden = np.zeros((error_tensor.shape[0] + 1, self.hidden_size))
gradient_cell = np.zeros((error_tensor.shape[0] + 1, self.hidden_size))
gradient_weights_out = 0
gradient_weights_middle = 0
# giving inputs sequentially
for t in reversed(range(len(error_tensor))):
# gradient of output w.r.t input
self.sigmoid4.activation = self.output_tensor[t]
gradient_out_wrt_in = self.sigmoid4.backward(np.copy(error_tensor)[t])
self.fully_out.input_tensor = self.hidden_state_b[t]
gradient_out_wrt_in = self.fully_out.backward(gradient_out_wrt_in)
gradient_weights_out += self.fully_out.gradient_weights
# gradient summing
out_hidden = gradient_hidden[t] + gradient_out_wrt_in
# gradient output gate
o_gradient = np.copy(out_hidden) * self.tanh2_out[t]
self.sigmoid3.activation = self.o[t]
o_gradient = self.sigmoid3.backward(o_gradient)
# gradient tanh2
gradient_out_wrt_in_cell = np.copy(out_hidden) * self.o[t]
self.tanh2.activation = self.tanh2_out[t]
gradient_out_wrt_in_cell = self.tanh2.backward(gradient_out_wrt_in_cell)
# gradient summing
out_cell = gradient_out_wrt_in_cell + gradient_cell[t + 1]
'''gradient of the summation'''
# gradient candidate
C_tilda_gradient = np.copy(out_cell) * self.i[t]
self.tanh1.activation = self.C_tilda[t]
C_tilda_gradient = self.tanh1.backward(C_tilda_gradient)
# gradient input gate
i_gradient = np.copy(out_cell) * self.C_tilda[t]
self.sigmoid2.activation = self.i[t]
i_gradient = self.sigmoid2.backward(i_gradient)
# gradient cell
gradient_cell[t] = np.copy(out_cell) * self.f[t]
# gradient forget gate
f_gradient = np.copy(out_cell) * self.cell_state_b[t]
self.sigmoid1.activation = self.f[t]
f_gradient = self.sigmoid1.backward(f_gradient)
# concatenation for the fully connected
self.fully_middle.input_tensor = self.fully_middle_input[t]
y = self.fully_middle.backward(np.concatenate([f_gradient, i_gradient, C_tilda_gradient, o_gradient]))
gradient_weights_middle += self.fully_middle.gradient_weights
gradient_hidden[t - 1] = y[:self.hidden_size]
gradient_input[t] = y[self.hidden_size:]
if self._optimizer:
self.fully_out.weights = self._optimizer2.calculate_update(self.fully_out.weights, gradient_weights_out)
self.fully_middle.weights = self._optimizer.calculate_update(self.fully_middle.weights, gradient_weights_middle)
self.final_gradient_weights = gradient_weights_middle
return gradient_input
def initialize(self, weights_initializer, bias_initializer):
self.fully_middle.initialize(weights_initializer, bias_initializer)
self.fully_out.initialize(weights_initializer, bias_initializer)
def calculate_regularization_loss(self, layer):
r_loss = 0
if hasattr(layer, 'optimizer'):
if layer.optimizer:
if layer.optimizer.regularizer:
r_loss += layer.optimizer.regularizer.norm(layer.weights)
return r_loss
'''Properties'''
@property
def memorize(self):
return self._memorize
@memorize.setter
def memorize(self, value):
self._memorize = value
@property
def gradient_weights(self):
return self.final_gradient_weights
@gradient_weights.setter
def gradient_weights(self, value):
self.fully_middle.gradient_weights = value
@gradient_weights.deleter
def gradient_weights(self):
del self.fully_middle.gradient_weights
@property
def weights(self):
return self.fully_middle.weights
@weights.setter
def weights(self, value):
self.fully_middle.weights = value
@weights.deleter
def weights(self):
del self.fully_middle.weights
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, value):
self._optimizer = value
self._optimizer2 = copy.deepcopy(self._optimizer)
@optimizer.deleter
def optimizer(self):
del self._optimizer | 37.155738 | 124 | 0.64604 |
from Layers.Base import *
import numpy as np
import pdb
from Layers import Sigmoid, FullyConnected, TanH
import copy
class LSTM(base_layer):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.hidden_state = np.zeros((self.hidden_size))
self.cell_state = np.zeros((self.hidden_size))
self._memorize = False
self._optimizer = None
self._gradient_weights = 0
self._weights = None
self.sigmoid1 = Sigmoid.Sigmoid()
self.sigmoid2 = Sigmoid.Sigmoid()
self.sigmoid3 = Sigmoid.Sigmoid()
self.sigmoid4 = Sigmoid.Sigmoid()
self.tanh1 = TanH.TanH()
self.tanh2 = TanH.TanH()
self.fully_middle = FullyConnected.FullyConnected(input_size=input_size + hidden_size ,
output_size=4 * hidden_size)
self.fully_out = FullyConnected.FullyConnected(input_size=hidden_size, output_size=output_size)
def forward(self, input_tensor):
output_tensor = np.zeros((input_tensor.shape[0], self.output_size))
self.o = []
self.i = []
self.f = []
self.C_tilda = []
self.cell_state_b = []
self.hidden_state_b = []
self.tanh2_out = []
self.fully_middle_input = []
if self._memorize == False:
self.hidden_state = np.zeros((self.hidden_size))
self.cell_state = np.zeros((self.hidden_size))
self.cell_state_b.append(self.cell_state)
self.hidden_state_b.append(np.zeros((self.hidden_size + 1)))
for t, batch in enumerate(input_tensor):
X_tilda = np.concatenate([self.hidden_state, batch])
fully_middle_out = self.fully_middle.forward(X_tilda)
self.fully_middle_input.append((self.fully_middle.input_tensor))
f = self.sigmoid1.forward(fully_middle_out[:fully_middle_out.shape[0]//4])
self.f.append(f)
i = self.sigmoid2.forward(fully_middle_out[fully_middle_out.shape[0]//4:fully_middle_out.shape[0]//2])
self.i.append(i)
C_tilda = self.tanh1.forward(fully_middle_out[fully_middle_out.shape[0]//2: 3*fully_middle_out.shape[0]//4])
self.C_tilda.append(C_tilda)
self.cell_state = f * self.cell_state + i * C_tilda
self.cell_state_b.append(self.cell_state)
o = self.sigmoid3.forward(fully_middle_out[3*fully_middle_out.shape[0]//4:])
self.o.append(o)
tanh2_out = self.tanh2.forward(self.cell_state)
self.tanh2_out.append(tanh2_out)
self.hidden_state = o * tanh2_out
y = self.fully_out.forward(self.hidden_state)
self.hidden_state_b.append(self.fully_out.input_tensor)
y = self.sigmoid4.forward(y)
output_tensor[t] = y
self.output_tensor = output_tensor
return output_tensor
def backward(self, error_tensor):
gradient_input = np.zeros((error_tensor.shape[0], self.input_size))
gradient_hidden = np.zeros((error_tensor.shape[0] + 1, self.hidden_size))
gradient_cell = np.zeros((error_tensor.shape[0] + 1, self.hidden_size))
gradient_weights_out = 0
gradient_weights_middle = 0
for t in reversed(range(len(error_tensor))):
self.sigmoid4.activation = self.output_tensor[t]
gradient_out_wrt_in = self.sigmoid4.backward(np.copy(error_tensor)[t])
self.fully_out.input_tensor = self.hidden_state_b[t]
gradient_out_wrt_in = self.fully_out.backward(gradient_out_wrt_in)
gradient_weights_out += self.fully_out.gradient_weights
out_hidden = gradient_hidden[t] + gradient_out_wrt_in
o_gradient = np.copy(out_hidden) * self.tanh2_out[t]
self.sigmoid3.activation = self.o[t]
o_gradient = self.sigmoid3.backward(o_gradient)
gradient_out_wrt_in_cell = np.copy(out_hidden) * self.o[t]
self.tanh2.activation = self.tanh2_out[t]
gradient_out_wrt_in_cell = self.tanh2.backward(gradient_out_wrt_in_cell)
out_cell = gradient_out_wrt_in_cell + gradient_cell[t + 1]
C_tilda_gradient = np.copy(out_cell) * self.i[t]
self.tanh1.activation = self.C_tilda[t]
C_tilda_gradient = self.tanh1.backward(C_tilda_gradient)
i_gradient = np.copy(out_cell) * self.C_tilda[t]
self.sigmoid2.activation = self.i[t]
i_gradient = self.sigmoid2.backward(i_gradient)
gradient_cell[t] = np.copy(out_cell) * self.f[t]
f_gradient = np.copy(out_cell) * self.cell_state_b[t]
self.sigmoid1.activation = self.f[t]
f_gradient = self.sigmoid1.backward(f_gradient)
self.fully_middle.input_tensor = self.fully_middle_input[t]
y = self.fully_middle.backward(np.concatenate([f_gradient, i_gradient, C_tilda_gradient, o_gradient]))
gradient_weights_middle += self.fully_middle.gradient_weights
gradient_hidden[t - 1] = y[:self.hidden_size]
gradient_input[t] = y[self.hidden_size:]
if self._optimizer:
self.fully_out.weights = self._optimizer2.calculate_update(self.fully_out.weights, gradient_weights_out)
self.fully_middle.weights = self._optimizer.calculate_update(self.fully_middle.weights, gradient_weights_middle)
self.final_gradient_weights = gradient_weights_middle
return gradient_input
def initialize(self, weights_initializer, bias_initializer):
self.fully_middle.initialize(weights_initializer, bias_initializer)
self.fully_out.initialize(weights_initializer, bias_initializer)
def calculate_regularization_loss(self, layer):
r_loss = 0
if hasattr(layer, 'optimizer'):
if layer.optimizer:
if layer.optimizer.regularizer:
r_loss += layer.optimizer.regularizer.norm(layer.weights)
return r_loss
@property
def memorize(self):
return self._memorize
@memorize.setter
def memorize(self, value):
self._memorize = value
@property
def gradient_weights(self):
return self.final_gradient_weights
@gradient_weights.setter
def gradient_weights(self, value):
self.fully_middle.gradient_weights = value
@gradient_weights.deleter
def gradient_weights(self):
del self.fully_middle.gradient_weights
@property
def weights(self):
return self.fully_middle.weights
@weights.setter
def weights(self, value):
self.fully_middle.weights = value
@weights.deleter
def weights(self):
del self.fully_middle.weights
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, value):
self._optimizer = value
self._optimizer2 = copy.deepcopy(self._optimizer)
@optimizer.deleter
def optimizer(self):
del self._optimizer | true | true |
f731e59f967dc833ca0efbb9f27ea0cceff52d86 | 285 | py | Python | tp_epi/tp_epi/doctype/epi_catalog_listing/test_epi_catalog_listing.py | mntechnique/tp_epi | 078a2590b50c759555880f143841ef74071b83bc | [
"MIT"
] | null | null | null | tp_epi/tp_epi/doctype/epi_catalog_listing/test_epi_catalog_listing.py | mntechnique/tp_epi | 078a2590b50c759555880f143841ef74071b83bc | [
"MIT"
] | null | null | null | tp_epi/tp_epi/doctype/epi_catalog_listing/test_epi_catalog_listing.py | mntechnique/tp_epi | 078a2590b50c759555880f143841ef74071b83bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, MN Technique and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('EPI Catalog Listing')
class TestEPICatalogListing(unittest.TestCase):
pass
| 21.923077 | 63 | 0.778947 |
from __future__ import unicode_literals
import frappe
import unittest
class TestEPICatalogListing(unittest.TestCase):
pass
| true | true |
f731e5a5be517d62ed376f4bef3cd377ea2b9167 | 438 | py | Python | nodux_stock_one/config/desktop.py | tatiqm25/nodux_stock_one | 78bcfd33b8ec89e2f7399dff55bb597cc52be2c0 | [
"MIT"
] | null | null | null | nodux_stock_one/config/desktop.py | tatiqm25/nodux_stock_one | 78bcfd33b8ec89e2f7399dff55bb597cc52be2c0 | [
"MIT"
] | null | null | null | nodux_stock_one/config/desktop.py | tatiqm25/nodux_stock_one | 78bcfd33b8ec89e2f7399dff55bb597cc52be2c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Nodux Stock One",
"color": "darkgrey",
"icon": "octicon octicon-file-directory",
"type": "module",
"hidden": 1
},
{
"module_name": "Stock",
"_doctype": "Stock One",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "link",
"link": "List/Stock One"
}
]
| 18.25 | 44 | 0.586758 |
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Nodux Stock One",
"color": "darkgrey",
"icon": "octicon octicon-file-directory",
"type": "module",
"hidden": 1
},
{
"module_name": "Stock",
"_doctype": "Stock One",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "link",
"link": "List/Stock One"
}
]
| true | true |
f731e64c211030f3004ca41b06d5ae447a8e7b28 | 3,626 | py | Python | configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py | wangxu19920419/mmdetection | b7890e243babd7d916086ce288f12350d816bd15 | [
"Apache-2.0"
] | null | null | null | configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py | wangxu19920419/mmdetection | b7890e243babd7d916086ce288f12350d816bd15 | [
"Apache-2.0"
] | null | null | null | configs/fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py | wangxu19920419/mmdetection | b7890e243babd7d916086ce288f12350d816bd15 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='FCOS',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/mnt/cephfs_wj/cv/common/coco/COCO2017/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
data = dict(
imgs_per_gpu=1,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017_2parts.json',
img_prefix=data_root,
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=False,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=0.0001,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
device_ids = range(4)
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '/mnt/cephfs_wj/cv/wangxu.ailab/ideas_experiments/mmdetection/work_dirs/fcos_r50_caffe_fpn_gn_1x_4gpu'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 27.892308 | 113 | 0.620243 |
model = dict(
type='FCOS',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False,
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
dataset_type = 'CocoDataset'
data_root = '/mnt/cephfs_wj/cv/common/coco/COCO2017/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
data = dict(
imgs_per_gpu=1,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017_2parts.json',
img_prefix=data_root,
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=False,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=False,
test_mode=True))
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=0.0001,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
total_epochs = 12
device_ids = range(4)
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '/mnt/cephfs_wj/cv/wangxu.ailab/ideas_experiments/mmdetection/work_dirs/fcos_r50_caffe_fpn_gn_1x_4gpu'
load_from = None
resume_from = None
workflow = [('train', 1)]
| true | true |
f731e6dbf70d7c3cf15954de216636afb6bd4249 | 3,337 | py | Python | main.py | LarsChrWiik/Comparing-Machine-Learning-Models | 050b1bdb40c1d2e9c15f927e9eb257b4b7aaacbe | [
"MIT"
] | null | null | null | main.py | LarsChrWiik/Comparing-Machine-Learning-Models | 050b1bdb40c1d2e9c15f927e9eb257b4b7aaacbe | [
"MIT"
] | null | null | null | main.py | LarsChrWiik/Comparing-Machine-Learning-Models | 050b1bdb40c1d2e9c15f927e9eb257b4b7aaacbe | [
"MIT"
] | null | null | null |
from scipy.io import arff
from sklearn.pipeline import Pipeline
from sklearn.utils import shuffle
from ModelScorer import ModelScorer
import pandas as pd
from Plotter import *
import warnings
#warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore")
pd.set_option('display.expand_frame_repr', False)
# Machine Learning Classifiers.
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.svm import OneClassSVM
from xgboost import XGBClassifier
Y_col = 'C'
def read_arff(name):
data = arff.loadarff(name)
df = pd.DataFrame(data[0])
# Convert target strings to bits.
df[Y_col] = df[Y_col].map(lambda x: 1 if str(x)[2:-1]=='True' else 0)
return df
def score_models():
df = read_arff('dataset.arff')
# Normalize.
df = df.apply(lambda x: (x - x.min()) /(x.max() - x.min()), axis=0)
# Unsupervised Learning.
X = df.drop(Y_col, axis=1)
ocsvm = OneClassSVM()
ocsvm.fit(X)
df['Category'] = ocsvm.predict(X)
# Model Scorer.
scores = []
model_scorer = ModelScorer(df=df, Y_col=Y_col)
scores.append(model_scorer.score_model(clf=DummyClassifier()))
scores.append(model_scorer.score_model(clf=DecisionTreeClassifier()))
scores.append(model_scorer.score_model(clf=RandomForestClassifier(n_estimators=100)))
scores.append(model_scorer.score_model(clf=GradientBoostingClassifier(n_estimators=100)))
scores.append(model_scorer.score_model(clf=XGBClassifier(n_estimators=100)))
scores.append(model_scorer.score_model(clf=SGDClassifier()))
scores.append(model_scorer.score_model(clf=LogisticRegression()))
scores.append(model_scorer.score_model(clf=GaussianNB()))
scores.append(model_scorer.score_model(clf=KNeighborsClassifier()))
scores.append(model_scorer.score_model(clf=BernoulliNB()))
scores.append(model_scorer.score_model(clf=SVC(kernel='linear', degree=5)))
scores.append(model_scorer.score_model(clf = MLPClassifier()))
scores.append(model_scorer.score_model(
clf = MLPClassifier(
activation = 'tanh',
solver = 'lbfgs',
hidden_layer_sizes = 100,
learning_rate_init = 0.001,
max_iter = 100000
),
name='Tuned MLPClassifier')
)
df_result = pd.concat(scores).reset_index(drop=True)
df_result = df_result.sort_values(["accuracy"], ascending=False)
print(df_result)
def show_feature_importances():
df = read_arff('dataset.arff')
# Normalize.
df = df.apply(lambda x: (x - x.min()) /(x.max() - x.min()), axis=0)
X = df.drop(Y_col, axis=1)
Y = df[Y_col]
plot_feature_importance(X, Y)
if __name__ == "__main__":
#score_models()
show_feature_importances()
| 33.707071 | 94 | 0.709919 |
from scipy.io import arff
from sklearn.pipeline import Pipeline
from sklearn.utils import shuffle
from ModelScorer import ModelScorer
import pandas as pd
from Plotter import *
import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.expand_frame_repr', False)
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.svm import OneClassSVM
from xgboost import XGBClassifier
Y_col = 'C'
def read_arff(name):
data = arff.loadarff(name)
df = pd.DataFrame(data[0])
df[Y_col] = df[Y_col].map(lambda x: 1 if str(x)[2:-1]=='True' else 0)
return df
def score_models():
df = read_arff('dataset.arff')
df = df.apply(lambda x: (x - x.min()) /(x.max() - x.min()), axis=0)
X = df.drop(Y_col, axis=1)
ocsvm = OneClassSVM()
ocsvm.fit(X)
df['Category'] = ocsvm.predict(X)
scores = []
model_scorer = ModelScorer(df=df, Y_col=Y_col)
scores.append(model_scorer.score_model(clf=DummyClassifier()))
scores.append(model_scorer.score_model(clf=DecisionTreeClassifier()))
scores.append(model_scorer.score_model(clf=RandomForestClassifier(n_estimators=100)))
scores.append(model_scorer.score_model(clf=GradientBoostingClassifier(n_estimators=100)))
scores.append(model_scorer.score_model(clf=XGBClassifier(n_estimators=100)))
scores.append(model_scorer.score_model(clf=SGDClassifier()))
scores.append(model_scorer.score_model(clf=LogisticRegression()))
scores.append(model_scorer.score_model(clf=GaussianNB()))
scores.append(model_scorer.score_model(clf=KNeighborsClassifier()))
scores.append(model_scorer.score_model(clf=BernoulliNB()))
scores.append(model_scorer.score_model(clf=SVC(kernel='linear', degree=5)))
scores.append(model_scorer.score_model(clf = MLPClassifier()))
scores.append(model_scorer.score_model(
clf = MLPClassifier(
activation = 'tanh',
solver = 'lbfgs',
hidden_layer_sizes = 100,
learning_rate_init = 0.001,
max_iter = 100000
),
name='Tuned MLPClassifier')
)
df_result = pd.concat(scores).reset_index(drop=True)
df_result = df_result.sort_values(["accuracy"], ascending=False)
print(df_result)
def show_feature_importances():
df = read_arff('dataset.arff')
df = df.apply(lambda x: (x - x.min()) /(x.max() - x.min()), axis=0)
X = df.drop(Y_col, axis=1)
Y = df[Y_col]
plot_feature_importance(X, Y)
if __name__ == "__main__":
show_feature_importances()
| true | true |
f731e81692857af543cde6e9c07ecfe8dccf96ac | 841 | py | Python | test_seq_features.py | daviortega/protfeature | 76f0141f72d0c34a29704598cde5c15e68c6b818 | [
"MIT"
] | null | null | null | test_seq_features.py | daviortega/protfeature | 76f0141f72d0c34a29704598cde5c15e68c6b818 | [
"MIT"
] | null | null | null | test_seq_features.py | daviortega/protfeature | 76f0141f72d0c34a29704598cde5c15e68c6b818 | [
"MIT"
] | null | null | null | import pytest
from seq_features import *
def test_n_neg_for_single_E_or_D():
"""Perform unit tests on n_neg."""
assert n_neg('E') == 1
assert n_neg('D') == 1
def test_n_neg_for_empty_sequence():
assert n_neg('') == 0
def test_n_neg_for_longer_sequences():
assert n_neg('ACKLWTTAE') == 1
assert n_neg('DDDDEEEE') == 8
def test_n_neg_for_lower_case_sequences():
assert n_neg('acklwttae') == 1
def test_n_neg_for_invalid_aminoacid():
with pytest.raises(RuntimeError) as excinfo:
n_neg('Z')
excinfo.match("Z is not a valid amino acid")
with pytest.raises(RuntimeError) as excinfo:
n_neg('z')
excinfo.match("Z is not a valid amino acid")
with pytest.raises(RuntimeError) as excinfo:
n_neg('KAACABAYABADDLKPPSD')
excinfo.match("B is not a valid amino acid")
| 25.484848 | 48 | 0.681332 | import pytest
from seq_features import *
def test_n_neg_for_single_E_or_D():
assert n_neg('E') == 1
assert n_neg('D') == 1
def test_n_neg_for_empty_sequence():
assert n_neg('') == 0
def test_n_neg_for_longer_sequences():
assert n_neg('ACKLWTTAE') == 1
assert n_neg('DDDDEEEE') == 8
def test_n_neg_for_lower_case_sequences():
assert n_neg('acklwttae') == 1
def test_n_neg_for_invalid_aminoacid():
with pytest.raises(RuntimeError) as excinfo:
n_neg('Z')
excinfo.match("Z is not a valid amino acid")
with pytest.raises(RuntimeError) as excinfo:
n_neg('z')
excinfo.match("Z is not a valid amino acid")
with pytest.raises(RuntimeError) as excinfo:
n_neg('KAACABAYABADDLKPPSD')
excinfo.match("B is not a valid amino acid")
| true | true |
f731e81f7cb78dfca687516d372c3f03d4e3eccb | 56 | py | Python | inclearn/convnet/__init__.py | romilbhardwaj/incremental_learning.pytorch | 77097ef4dd4fc6b6c35d13ef66856d6f8a15598d | [
"MIT"
] | 3 | 2019-07-01T14:43:05.000Z | 2019-12-27T13:26:52.000Z | inclearn/convnet/__init__.py | rahulvigneswaran/incremental_learning.pytorch | 786ecda7dbce5977894737d61cd5e3a30f61aac6 | [
"MIT"
] | null | null | null | inclearn/convnet/__init__.py | rahulvigneswaran/incremental_learning.pytorch | 786ecda7dbce5977894737d61cd5e3a30f61aac6 | [
"MIT"
] | null | null | null | from . import cifar_resnet, densenet, my_resnet, resnet
| 28 | 55 | 0.803571 | from . import cifar_resnet, densenet, my_resnet, resnet
| true | true |
f731e8d74b347c0259534af53c389d1a5bf2b3bc | 13,125 | py | Python | devel/lib/python2.7/dist-packages/hector_uav_msgs/msg/_LandingActionGoal.py | pplankton/MRSLAM | 0a16489a2cbd0c2d1511b506c540446cc670bde8 | [
"MIT"
] | 1 | 2021-05-17T11:13:01.000Z | 2021-05-17T11:13:01.000Z | devel/lib/python2.7/dist-packages/hector_uav_msgs/msg/_LandingActionGoal.py | pplankton/MRSLAM | 0a16489a2cbd0c2d1511b506c540446cc670bde8 | [
"MIT"
] | null | null | null | devel/lib/python2.7/dist-packages/hector_uav_msgs/msg/_LandingActionGoal.py | pplankton/MRSLAM | 0a16489a2cbd0c2d1511b506c540446cc670bde8 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from hector_uav_msgs/LandingActionGoal.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import hector_uav_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class LandingActionGoal(genpy.Message):
_md5sum = "f5e95feb07d8f5f21d989eb34d7c3243"
_type = "hector_uav_msgs/LandingActionGoal"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalID goal_id
LandingGoal goal
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: hector_uav_msgs/LandingGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Landing pose, pose.z is ignored.
# If no stamp is provided, landing_zone is assumed to be empty and
# robot will land directly below
geometry_msgs/PoseStamped landing_zone
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['header','goal_id','goal']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalID','hector_uav_msgs/LandingGoal']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,goal_id,goal
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(LandingActionGoal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = hector_uav_msgs.msg.LandingGoal()
else:
self.header = std_msgs.msg.Header()
self.goal_id = actionlib_msgs.msg.GoalID()
self.goal = hector_uav_msgs.msg.LandingGoal()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
_x = self.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.goal.landing_zone.header.seq, _x.goal.landing_zone.header.stamp.secs, _x.goal.landing_zone.header.stamp.nsecs))
_x = self.goal.landing_zone.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.goal.landing_zone.pose.position.x, _x.goal.landing_zone.pose.position.y, _x.goal.landing_zone.pose.position.z, _x.goal.landing_zone.pose.orientation.x, _x.goal.landing_zone.pose.orientation.y, _x.goal.landing_zone.pose.orientation.z, _x.goal.landing_zone.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = hector_uav_msgs.msg.LandingGoal()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal_id.id = str[start:end].decode('utf-8')
else:
self.goal_id.id = str[start:end]
_x = self
start = end
end += 12
(_x.goal.landing_zone.header.seq, _x.goal.landing_zone.header.stamp.secs, _x.goal.landing_zone.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal.landing_zone.header.frame_id = str[start:end].decode('utf-8')
else:
self.goal.landing_zone.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.goal.landing_zone.pose.position.x, _x.goal.landing_zone.pose.position.y, _x.goal.landing_zone.pose.position.z, _x.goal.landing_zone.pose.orientation.x, _x.goal.landing_zone.pose.orientation.y, _x.goal.landing_zone.pose.orientation.z, _x.goal.landing_zone.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
_x = self.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.goal.landing_zone.header.seq, _x.goal.landing_zone.header.stamp.secs, _x.goal.landing_zone.header.stamp.nsecs))
_x = self.goal.landing_zone.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.goal.landing_zone.pose.position.x, _x.goal.landing_zone.pose.position.y, _x.goal.landing_zone.pose.position.z, _x.goal.landing_zone.pose.orientation.x, _x.goal.landing_zone.pose.orientation.y, _x.goal.landing_zone.pose.orientation.z, _x.goal.landing_zone.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = hector_uav_msgs.msg.LandingGoal()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal_id.id = str[start:end].decode('utf-8')
else:
self.goal_id.id = str[start:end]
_x = self
start = end
end += 12
(_x.goal.landing_zone.header.seq, _x.goal.landing_zone.header.stamp.secs, _x.goal.landing_zone.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal.landing_zone.header.frame_id = str[start:end].decode('utf-8')
else:
self.goal.landing_zone.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.goal.landing_zone.pose.position.x, _x.goal.landing_zone.pose.position.y, _x.goal.landing_zone.pose.position.z, _x.goal.landing_zone.pose.orientation.x, _x.goal.landing_zone.pose.orientation.y, _x.goal.landing_zone.pose.orientation.z, _x.goal.landing_zone.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| 38.265306 | 327 | 0.643962 |
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import hector_uav_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class LandingActionGoal(genpy.Message):
_md5sum = "f5e95feb07d8f5f21d989eb34d7c3243"
_type = "hector_uav_msgs/LandingActionGoal"
_has_header = True
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalID goal_id
LandingGoal goal
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: hector_uav_msgs/LandingGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Landing pose, pose.z is ignored.
# If no stamp is provided, landing_zone is assumed to be empty and
# robot will land directly below
geometry_msgs/PoseStamped landing_zone
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['header','goal_id','goal']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalID','hector_uav_msgs/LandingGoal']
def __init__(self, *args, **kwds):
if args or kwds:
super(LandingActionGoal, self).__init__(*args, **kwds)
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = hector_uav_msgs.msg.LandingGoal()
else:
self.header = std_msgs.msg.Header()
self.goal_id = actionlib_msgs.msg.GoalID()
self.goal = hector_uav_msgs.msg.LandingGoal()
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
_x = self.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.goal.landing_zone.header.seq, _x.goal.landing_zone.header.stamp.secs, _x.goal.landing_zone.header.stamp.nsecs))
_x = self.goal.landing_zone.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.goal.landing_zone.pose.position.x, _x.goal.landing_zone.pose.position.y, _x.goal.landing_zone.pose.position.z, _x.goal.landing_zone.pose.orientation.x, _x.goal.landing_zone.pose.orientation.y, _x.goal.landing_zone.pose.orientation.z, _x.goal.landing_zone.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = hector_uav_msgs.msg.LandingGoal()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal_id.id = str[start:end].decode('utf-8')
else:
self.goal_id.id = str[start:end]
_x = self
start = end
end += 12
(_x.goal.landing_zone.header.seq, _x.goal.landing_zone.header.stamp.secs, _x.goal.landing_zone.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal.landing_zone.header.frame_id = str[start:end].decode('utf-8')
else:
self.goal.landing_zone.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.goal.landing_zone.pose.position.x, _x.goal.landing_zone.pose.position.y, _x.goal.landing_zone.pose.position.z, _x.goal.landing_zone.pose.orientation.x, _x.goal.landing_zone.pose.orientation.y, _x.goal.landing_zone.pose.orientation.z, _x.goal.landing_zone.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
_x = self.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.goal.landing_zone.header.seq, _x.goal.landing_zone.header.stamp.secs, _x.goal.landing_zone.header.stamp.nsecs))
_x = self.goal.landing_zone.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.goal.landing_zone.pose.position.x, _x.goal.landing_zone.pose.position.y, _x.goal.landing_zone.pose.position.z, _x.goal.landing_zone.pose.orientation.x, _x.goal.landing_zone.pose.orientation.y, _x.goal.landing_zone.pose.orientation.z, _x.goal.landing_zone.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = hector_uav_msgs.msg.LandingGoal()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal_id.id = str[start:end].decode('utf-8')
else:
self.goal_id.id = str[start:end]
_x = self
start = end
end += 12
(_x.goal.landing_zone.header.seq, _x.goal.landing_zone.header.stamp.secs, _x.goal.landing_zone.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal.landing_zone.header.frame_id = str[start:end].decode('utf-8')
else:
self.goal.landing_zone.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.goal.landing_zone.pose.position.x, _x.goal.landing_zone.pose.position.y, _x.goal.landing_zone.pose.position.z, _x.goal.landing_zone.pose.orientation.x, _x.goal.landing_zone.pose.orientation.y, _x.goal.landing_zone.pose.orientation.z, _x.goal.landing_zone.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| true | true |
f731e9a98528dc5e011f119e55fcaa7ffca1650c | 1,917 | py | Python | keanu-python/keanu/infer_type.py | rs992214/keanu | f7f9b877aaaf9c9f732604f17da238e15dfdad13 | [
"MIT"
] | 153 | 2018-04-06T13:30:31.000Z | 2022-01-31T10:05:27.000Z | keanu-python/keanu/infer_type.py | shinnlok/keanu | c75b2a00571a0da93c6b1d5e9f0cbe09aebdde4d | [
"MIT"
] | 168 | 2018-04-06T16:37:33.000Z | 2021-09-27T21:43:54.000Z | keanu-python/keanu/infer_type.py | shinnlok/keanu | c75b2a00571a0da93c6b1d5e9f0cbe09aebdde4d | [
"MIT"
] | 46 | 2018-04-10T10:46:01.000Z | 2022-02-24T02:53:50.000Z | from typing import Callable, Dict, Any, Union
import numpy as np
from keanu.vartypes import (numpy_types, tensor_arg_types, runtime_numpy_types, runtime_pandas_types,
runtime_primitive_types, runtime_bool_types, runtime_int_types, runtime_float_types,
primitive_types)
from keanu.vertex.base import Vertex
def infer_type_and_execute(value: tensor_arg_types, actions: Dict[type, Callable]) -> Any:
return actions[get_type_of_value(value)](value)
def get_type_of_value(t: Union[tensor_arg_types, Vertex]) -> type:
if isinstance(t, Vertex):
return get_type_of_value(t.get_value())
if isinstance(t, runtime_numpy_types):
return __infer_type_from_ndarray(t)
elif isinstance(t, runtime_pandas_types):
return __infer_type_from_ndarray(t.values)
elif isinstance(t, runtime_primitive_types):
return __infer_type_from_scalar(t)
else:
raise NotImplementedError(
"Argument t must be either an ndarray or an instance of numbers.Number. Was given {} instead".format(
type(t)))
def __infer_type_from_ndarray(ndarray: numpy_types) -> type:
if np.issubdtype(ndarray.dtype, np.bool_):
return bool
elif np.issubdtype(ndarray.dtype, np.integer):
return int
elif np.issubdtype(ndarray.dtype, np.floating):
return float
else:
raise NotImplementedError("Generic types in an ndarray are not supported. Was given {}".format(ndarray.dtype))
def __infer_type_from_scalar(scalar: primitive_types) -> type:
if isinstance(scalar, runtime_bool_types):
return bool
elif isinstance(scalar, runtime_int_types):
return int
elif isinstance(scalar, runtime_float_types):
return float
else:
raise NotImplementedError("Generic types in an ndarray are not supported. Was given {}".format(type(scalar)))
| 38.34 | 118 | 0.709442 | from typing import Callable, Dict, Any, Union
import numpy as np
from keanu.vartypes import (numpy_types, tensor_arg_types, runtime_numpy_types, runtime_pandas_types,
runtime_primitive_types, runtime_bool_types, runtime_int_types, runtime_float_types,
primitive_types)
from keanu.vertex.base import Vertex
def infer_type_and_execute(value: tensor_arg_types, actions: Dict[type, Callable]) -> Any:
return actions[get_type_of_value(value)](value)
def get_type_of_value(t: Union[tensor_arg_types, Vertex]) -> type:
if isinstance(t, Vertex):
return get_type_of_value(t.get_value())
if isinstance(t, runtime_numpy_types):
return __infer_type_from_ndarray(t)
elif isinstance(t, runtime_pandas_types):
return __infer_type_from_ndarray(t.values)
elif isinstance(t, runtime_primitive_types):
return __infer_type_from_scalar(t)
else:
raise NotImplementedError(
"Argument t must be either an ndarray or an instance of numbers.Number. Was given {} instead".format(
type(t)))
def __infer_type_from_ndarray(ndarray: numpy_types) -> type:
if np.issubdtype(ndarray.dtype, np.bool_):
return bool
elif np.issubdtype(ndarray.dtype, np.integer):
return int
elif np.issubdtype(ndarray.dtype, np.floating):
return float
else:
raise NotImplementedError("Generic types in an ndarray are not supported. Was given {}".format(ndarray.dtype))
def __infer_type_from_scalar(scalar: primitive_types) -> type:
if isinstance(scalar, runtime_bool_types):
return bool
elif isinstance(scalar, runtime_int_types):
return int
elif isinstance(scalar, runtime_float_types):
return float
else:
raise NotImplementedError("Generic types in an ndarray are not supported. Was given {}".format(type(scalar)))
| true | true |
f731e9dc46d9a42f547e7b29cb89e2dde3ec8755 | 15,925 | py | Python | tests/unit/api/resources/systems.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | 5 | 2020-06-04T10:20:33.000Z | 2020-10-26T15:09:19.000Z | tests/unit/api/resources/systems.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | null | null | null | tests/unit/api/resources/systems.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit test for systems resource module
"""
#
# IMPORTS
#
from tessia.server.api.resources.systems import SystemResource
from tessia.server.api.resources.systems import MSG_BAD_COMBO
from tessia.server.db import models
from tests.unit.api.resources.secure_resource import TestSecureResource
import json
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
class TestSystems(TestSecureResource):
"""
Validates the Systems resource
"""
# entry point for resource in api
RESOURCE_URL = '/systems'
# model associated with this resource
RESOURCE_MODEL = models.System
# api resource
RESOURCE_API = SystemResource
@classmethod
def _entry_gen(cls):
"""
Generator for producing new entries for database insertion.
"""
index = 0
while True:
data = {
'project': cls._db_entries['Project'][0]['name'],
'desc': '- System with some *markdown*',
'name': 'System {}'.format(index),
'hostname': 'system{}.domain.com'.format(index),
'hypervisor': 'cpc0',
'model': 'ZGENERIC',
'type': 'LPAR',
'state': 'AVAILABLE',
}
index += 1
yield data
# _entry_gen()
def test_add_all_fields_many_roles(self):
"""
Exercise the scenario where a user with permissions creates an item
by specifying all possible fields.
"""
logins = [
'user_user@domain.com',
'user_privileged@domain.com',
'user_project_admin@domain.com',
'user_hw_admin@domain.com',
'user_admin@domain.com'
]
self._test_add_all_fields_many_roles(logins)
# test_add_all_fields_many_roles()
def test_add_all_fields_no_role(self):
"""
Exercise the scenario where a normal user without permissions tries to
create an item and fails.
"""
logins = [
'user_restricted@domain.com',
]
self._test_add_all_fields_no_role(logins, http_code=422)
# test_add_all_fields_no_role()
def test_add_mandatory_fields(self):
"""
Exercise the scenario where a user with permissions creates an item
by specifying only the mandatory fields.
"""
# the fields to be omitted and their expected values on response
pop_fields = [
('desc', None),
('project', self._db_entries['Project'][0]['name']),
('hypervisor', None),
('model', 'ZGENERIC'),
('state', 'AVAILABLE')
]
self._test_add_mandatory_fields('user_user@domain.com', pop_fields)
# test_add_mandatory_fields()
def test_add_mandatory_fields_as_admin(self):
"""
Exercise the scenario where using the admin user to create an item
makes project a mandatory field.
"""
self._test_add_mandatory_fields_as_admin('user_admin@domain.com')
# test_add_mandatory_fields_as_admin()
def test_add_missing_field(self):
"""
Test if api correctly reports error when a mandatory field is missing
during creation.
"""
pop_fields = ['name', 'type']
self._test_add_missing_field('user_user@domain.com', pop_fields)
# test_add_missing_field()
def test_add_update_conflict(self):
"""
Test two scenarios:
1- add an item with a system name that already exists
2- update an item to a system name that already exists
"""
self._test_add_update_conflict('user_user@domain.com', 'name')
# test_update_conflict()
def test_add_update_wrong_field(self):
"""
Test if api correctly reports error when invalid values are used for
a field during creation and update.
"""
# specify fields with wrong types
wrong_data = [
('name', ''),
('name', ' '),
('name', ' name'),
('name', 'name with * symbol'),
('name', 5),
('name', True),
('name', None),
('hostname', 5),
('hostname', True),
('model', 5),
('model', True),
('state', 5),
('type', 5),
('type', None),
('hypervisor', 5),
('hypervisor', True),
('desc', False),
('project', 5),
('project', False),
('owner', False),
# read-only fields
('modified', 'something'),
('modifier', 'something'),
]
self._test_add_update_wrong_field(
'user_user@domain.com', wrong_data)
# test special cases when guest type does not match hypervisor's
def validate_resp(resp, msg):
"""Helper validator"""
self.assertEqual(resp.status_code, 422)
body = json.loads(resp.get_data(as_text=True))
self.assertEqual(msg, body['message'])
# validate_resp()
create_data = next(self._get_next_entry)
orig_hyp = create_data['hypervisor']
update_data = self._create_many_entries(
'user_admin@domain.com', 1)[0][0]
update_data = {
'id': update_data['id']
}
for action, data in (
('create', create_data), ('update', update_data),):
# 1- invalid hypervisor
data['hypervisor'] = 'something_wrong'
resp = self._do_request(action, 'user_admin@domain.com:a', data)
msg = (
"No associated item found with value 'something_wrong' "
"for field '{}'".format(
self.RESOURCE_API.Schema.hypervisor.description)
)
validate_resp(resp, msg)
# 2- invalid type
data['hypervisor'] = orig_hyp
data['type'] = 'something_wrong'
msg = (
"No associated item found with value 'something_wrong' "
"for field '{}'".format(
self.RESOURCE_API.Schema.type.description)
)
resp = self._do_request(action, 'user_admin@domain.com:a', data)
validate_resp(resp, msg)
# 3- invalid combination (KVM guest of CPC)
data['type'] = 'KVM'
data['hypervisor'] = 'cpc0'
resp = self._do_request(action, 'user_admin@domain.com:a', data)
validate_resp(resp, MSG_BAD_COMBO)
# 4- valid combination, check that model is auto set to hypervisor's
hyp_model = self.RESOURCE_MODEL.query.filter_by(
name='cpc0').one().model
create_data.pop('model')
create_data['type'] = 'LPAR'
create_data['hypervisor'] = 'cpc0'
resp = self._do_request(
'create', 'user_admin@domain.com:a', create_data)
create_data['model'] = hyp_model
self._assert_created(resp, create_data)
# test_add_update_wrong_field()
def test_del_many_roles(self):
"""
Exercise to remove entries with different roles
"""
combos = [
('user_hw_admin@domain.com', 'user_hw_admin@domain.com'),
('user_hw_admin@domain.com', 'user_admin@domain.com'),
('user_admin@domain.com', 'user_admin@domain.com'),
('user_admin@domain.com', 'user_hw_admin@domain.com'),
('user_user@domain.com', 'user_user@domain.com'),
('user_user@domain.com', 'user_project_admin@domain.com'),
('user_privileged@domain.com', 'user_privileged@domain.com'),
('user_privileged@domain.com', 'user_project_admin@domain.com'),
('user_project_admin@domain.com', 'user_project_admin@domain.com'),
('user_project_admin@domain.com', 'user_hw_admin@domain.com'),
]
self._test_del_many_roles(combos)
# test_del_many_roles()
def test_del_has_dependent(self):
"""
Try to delete an item which has a system profile associated with it.
"""
entry = self._create_many_entries(
'user_admin@domain.com', 1)[0][0]
hyp = self.RESOURCE_MODEL.query.filter_by(
name=entry['hypervisor']).one()
self._test_del_has_dependent(
'user_admin@domain.com', hyp.id, None)
# test_del_has_dependent()
def test_del_invalid_id(self):
"""
Test if api correctly handles the case when trying to delete an
invalid id
"""
self._test_del_invalid_id()
# test_del_invalid_id()
def test_del_no_role(self):
"""
Try to remove an entry without permissions
"""
combos = [
('user_admin@domain.com', 'user_user@domain.com'),
('user_admin@domain.com', 'user_privileged@domain.com'),
]
self._test_del_no_role(combos)
# restricted user has no read access
combos = [
('user_admin@domain.com', 'user_restricted@domain.com'),
]
self._test_del_no_role(combos, http_code=404)
# test_del_no_role()
def test_list_and_read(self):
"""
Verify if listing and reading permissions are correctly handled
"""
logins = [
'user_user@domain.com',
'user_privileged@domain.com',
'user_project_admin@domain.com',
'user_hw_admin@domain.com',
'user_admin@domain.com',
]
self._test_list_and_read('user_hw_admin@domain.com', logins)
# test_list_and_read()
def test_list_and_read_restricted_no_role(self):
"""
List entries with a restricted user without role in any project
"""
self._test_list_and_read_restricted_no_role(
'user_user@domain.com', 'user_restricted@domain.com',
http_code=404)
# test_list_and_read_restricted_no_role()
def test_list_and_read_restricted_with_role(self):
"""
List entries with a restricted user who has a role in a project
"""
self._test_list_and_read_restricted_with_role(
'user_user@domain.com', 'user_restricted@domain.com')
# test_list_and_read_restricted_with_role()
def test_list_filtered(self):
"""
Test basic filtering capabilities
"""
# part_table and specs are not searchable so we don't add them
filter_values = {
'owner': 'user_project_admin@domain.com',
'modifier': 'user_project_admin@domain.com',
'project': self._db_entries['Project'][1]['name'],
'name': 'some_name_for_filter',
'hostname': 'some_hostname_for_filter',
'hypervisor': 'cpc0',
'model': 'ZGENERIC',
'type': 'KVM',
'state': 'LOCKED',
}
self._test_list_filtered('user_user@domain.com', filter_values)
# test_list_filtered()
def test_update_project(self):
"""
Exercise the update of the item's project. For that operation a user
requires permission on both projects.
"""
self._test_update_project()
# test_update_project()
def test_update_valid_fields(self):
"""
Exercise the update of existing objects when correct format and
writable fields are specified.
"""
entry = self._create_many_entries(
'user_user@domain.com', 1)[0][0]
update_fields = {
'owner': 'user_project_admin@domain.com',
'name': 'some_name',
'hostname': 'some_hostname',
'hypervisor': entry['name'],
'model': 'ZEC12_H20',
'type': 'KVM',
'state': 'LOCKED',
}
# combinations owner/updater
combos = [
# combinations to exercise the use of the UPDATE permission in the
# role
('user_hw_admin@domain.com', 'user_admin@domain.com'),
('user_admin@domain.com', 'user_hw_admin@domain.com'),
('user_user@domain.com', 'user_privileged@domain.com'),
('user_user@domain.com', 'user_project_admin@domain.com'),
('user_user@domain.com', 'user_hw_admin@domain.com'),
('user_user@domain.com', 'user_admin@domain.com'),
('user_privileged@domain.com', 'user_project_admin@domain.com'),
('user_privileged@domain.com', 'user_hw_admin@domain.com'),
('user_privileged@domain.com', 'user_admin@domain.com'),
('user_project_admin@domain.com', 'user_privileged@domain.com'),
('user_project_admin@domain.com', 'user_hw_admin@domain.com'),
('user_project_admin@domain.com', 'user_admin@domain.com'),
# combinations to exercise updating an item owned by the user
('user_user@domain.com', 'user_user@domain.com'),
]
self._test_update_valid_fields(
'user_hw_admin@domain.com', combos, update_fields)
# restricted user has no access to the project with this hypervisor,
# so it cannot update hypervisor field
update_fields.pop('hypervisor')
update_fields.pop('type')
# we also need to remove ownership change for user_restricted,
# otherwise it will not be able to find the changed item
update_fields.pop('owner')
combos = [
# combinations to exercise updating an item owned by the user
('user_restricted@domain.com', 'user_restricted@domain.com'),
]
self._test_update_valid_fields(
'user_hw_admin@domain.com', combos, update_fields)
# perform a simple update of type without changing hypervisor to
# reach specific if clause in resource api code
update_fields = {
'type': 'LPAR'
}
self._test_update_valid_fields(
'user_hw_admin@domain.com',
[('user_user@domain.com', 'user_user@domain.com')],
update_fields)
# test_update_valid_fields()
def test_add_update_assoc_error(self):
"""
Try creation and edit while setting a FK field to a value that has no
entry in the associated table.
"""
wrong_fields = [
('project', 'some_project'),
('owner', 'some_owner'),
('hypervisor', 'some_hypervisor'),
('state', 'some_state'),
('type', 'some_type'),
('model', 'some_model'),
]
self._test_add_update_assoc_error(
'user_project_admin@domain.com', wrong_fields)
# test_add_update_assoc_error()
def test_update_no_role(self):
"""
Try to update with a user without an appropriate role to do so.
"""
update_fields = {
'name': 'this_should_not_work',
}
logins = [
'user_user@domain.com',
]
self._test_update_no_role(
'user_hw_admin@domain.com', logins, update_fields)
# restricted users have no read access
logins = [
'user_restricted@domain.com',
]
self._test_update_no_role(
'user_hw_admin@domain.com', logins, update_fields, http_code=404)
# test_update_no_role()
# TestSystem
| 35.467706 | 79 | 0.589827 |
from tessia.server.api.resources.systems import SystemResource
from tessia.server.api.resources.systems import MSG_BAD_COMBO
from tessia.server.db import models
from tests.unit.api.resources.secure_resource import TestSecureResource
import json
class TestSystems(TestSecureResource):
RESOURCE_URL = '/systems'
RESOURCE_MODEL = models.System
RESOURCE_API = SystemResource
@classmethod
def _entry_gen(cls):
index = 0
while True:
data = {
'project': cls._db_entries['Project'][0]['name'],
'desc': '- System with some *markdown*',
'name': 'System {}'.format(index),
'hostname': 'system{}.domain.com'.format(index),
'hypervisor': 'cpc0',
'model': 'ZGENERIC',
'type': 'LPAR',
'state': 'AVAILABLE',
}
index += 1
yield data
def test_add_all_fields_many_roles(self):
logins = [
'user_user@domain.com',
'user_privileged@domain.com',
'user_project_admin@domain.com',
'user_hw_admin@domain.com',
'user_admin@domain.com'
]
self._test_add_all_fields_many_roles(logins)
def test_add_all_fields_no_role(self):
logins = [
'user_restricted@domain.com',
]
self._test_add_all_fields_no_role(logins, http_code=422)
def test_add_mandatory_fields(self):
pop_fields = [
('desc', None),
('project', self._db_entries['Project'][0]['name']),
('hypervisor', None),
('model', 'ZGENERIC'),
('state', 'AVAILABLE')
]
self._test_add_mandatory_fields('user_user@domain.com', pop_fields)
def test_add_mandatory_fields_as_admin(self):
self._test_add_mandatory_fields_as_admin('user_admin@domain.com')
def test_add_missing_field(self):
pop_fields = ['name', 'type']
self._test_add_missing_field('user_user@domain.com', pop_fields)
def test_add_update_conflict(self):
self._test_add_update_conflict('user_user@domain.com', 'name')
def test_add_update_wrong_field(self):
wrong_data = [
('name', ''),
('name', ' '),
('name', ' name'),
('name', 'name with * symbol'),
('name', 5),
('name', True),
('name', None),
('hostname', 5),
('hostname', True),
('model', 5),
('model', True),
('state', 5),
('type', 5),
('type', None),
('hypervisor', 5),
('hypervisor', True),
('desc', False),
('project', 5),
('project', False),
('owner', False),
('modified', 'something'),
('modifier', 'something'),
]
self._test_add_update_wrong_field(
'user_user@domain.com', wrong_data)
def validate_resp(resp, msg):
self.assertEqual(resp.status_code, 422)
body = json.loads(resp.get_data(as_text=True))
self.assertEqual(msg, body['message'])
# validate_resp()
create_data = next(self._get_next_entry)
orig_hyp = create_data['hypervisor']
update_data = self._create_many_entries(
'user_admin@domain.com', 1)[0][0]
update_data = {
'id': update_data['id']
}
for action, data in (
('create', create_data), ('update', update_data),):
# 1- invalid hypervisor
data['hypervisor'] = 'something_wrong'
resp = self._do_request(action, 'user_admin@domain.com:a', data)
msg = (
"No associated item found with value 'something_wrong' "
"for field '{}'".format(
self.RESOURCE_API.Schema.hypervisor.description)
)
validate_resp(resp, msg)
# 2- invalid type
data['hypervisor'] = orig_hyp
data['type'] = 'something_wrong'
msg = (
"No associated item found with value 'something_wrong' "
"for field '{}'".format(
self.RESOURCE_API.Schema.type.description)
)
resp = self._do_request(action, 'user_admin@domain.com:a', data)
validate_resp(resp, msg)
# 3- invalid combination (KVM guest of CPC)
data['type'] = 'KVM'
data['hypervisor'] = 'cpc0'
resp = self._do_request(action, 'user_admin@domain.com:a', data)
validate_resp(resp, MSG_BAD_COMBO)
# 4- valid combination, check that model is auto set to hypervisor's
hyp_model = self.RESOURCE_MODEL.query.filter_by(
name='cpc0').one().model
create_data.pop('model')
create_data['type'] = 'LPAR'
create_data['hypervisor'] = 'cpc0'
resp = self._do_request(
'create', 'user_admin@domain.com:a', create_data)
create_data['model'] = hyp_model
self._assert_created(resp, create_data)
def test_del_many_roles(self):
combos = [
('user_hw_admin@domain.com', 'user_hw_admin@domain.com'),
('user_hw_admin@domain.com', 'user_admin@domain.com'),
('user_admin@domain.com', 'user_admin@domain.com'),
('user_admin@domain.com', 'user_hw_admin@domain.com'),
('user_user@domain.com', 'user_user@domain.com'),
('user_user@domain.com', 'user_project_admin@domain.com'),
('user_privileged@domain.com', 'user_privileged@domain.com'),
('user_privileged@domain.com', 'user_project_admin@domain.com'),
('user_project_admin@domain.com', 'user_project_admin@domain.com'),
('user_project_admin@domain.com', 'user_hw_admin@domain.com'),
]
self._test_del_many_roles(combos)
def test_del_has_dependent(self):
entry = self._create_many_entries(
'user_admin@domain.com', 1)[0][0]
hyp = self.RESOURCE_MODEL.query.filter_by(
name=entry['hypervisor']).one()
self._test_del_has_dependent(
'user_admin@domain.com', hyp.id, None)
def test_del_invalid_id(self):
self._test_del_invalid_id()
def test_del_no_role(self):
combos = [
('user_admin@domain.com', 'user_user@domain.com'),
('user_admin@domain.com', 'user_privileged@domain.com'),
]
self._test_del_no_role(combos)
combos = [
('user_admin@domain.com', 'user_restricted@domain.com'),
]
self._test_del_no_role(combos, http_code=404)
def test_list_and_read(self):
logins = [
'user_user@domain.com',
'user_privileged@domain.com',
'user_project_admin@domain.com',
'user_hw_admin@domain.com',
'user_admin@domain.com',
]
self._test_list_and_read('user_hw_admin@domain.com', logins)
def test_list_and_read_restricted_no_role(self):
self._test_list_and_read_restricted_no_role(
'user_user@domain.com', 'user_restricted@domain.com',
http_code=404)
def test_list_and_read_restricted_with_role(self):
self._test_list_and_read_restricted_with_role(
'user_user@domain.com', 'user_restricted@domain.com')
def test_list_filtered(self):
filter_values = {
'owner': 'user_project_admin@domain.com',
'modifier': 'user_project_admin@domain.com',
'project': self._db_entries['Project'][1]['name'],
'name': 'some_name_for_filter',
'hostname': 'some_hostname_for_filter',
'hypervisor': 'cpc0',
'model': 'ZGENERIC',
'type': 'KVM',
'state': 'LOCKED',
}
self._test_list_filtered('user_user@domain.com', filter_values)
# test_list_filtered()
def test_update_project(self):
self._test_update_project()
# test_update_project()
def test_update_valid_fields(self):
entry = self._create_many_entries(
'user_user@domain.com', 1)[0][0]
update_fields = {
'owner': 'user_project_admin@domain.com',
'name': 'some_name',
'hostname': 'some_hostname',
'hypervisor': entry['name'],
'model': 'ZEC12_H20',
'type': 'KVM',
'state': 'LOCKED',
}
# combinations owner/updater
combos = [
# combinations to exercise the use of the UPDATE permission in the
# role
('user_hw_admin@domain.com', 'user_admin@domain.com'),
('user_admin@domain.com', 'user_hw_admin@domain.com'),
('user_user@domain.com', 'user_privileged@domain.com'),
('user_user@domain.com', 'user_project_admin@domain.com'),
('user_user@domain.com', 'user_hw_admin@domain.com'),
('user_user@domain.com', 'user_admin@domain.com'),
('user_privileged@domain.com', 'user_project_admin@domain.com'),
('user_privileged@domain.com', 'user_hw_admin@domain.com'),
('user_privileged@domain.com', 'user_admin@domain.com'),
('user_project_admin@domain.com', 'user_privileged@domain.com'),
('user_project_admin@domain.com', 'user_hw_admin@domain.com'),
('user_project_admin@domain.com', 'user_admin@domain.com'),
# combinations to exercise updating an item owned by the user
('user_user@domain.com', 'user_user@domain.com'),
]
self._test_update_valid_fields(
'user_hw_admin@domain.com', combos, update_fields)
# restricted user has no access to the project with this hypervisor,
# so it cannot update hypervisor field
update_fields.pop('hypervisor')
update_fields.pop('type')
# we also need to remove ownership change for user_restricted,
# otherwise it will not be able to find the changed item
update_fields.pop('owner')
combos = [
# combinations to exercise updating an item owned by the user
('user_restricted@domain.com', 'user_restricted@domain.com'),
]
self._test_update_valid_fields(
'user_hw_admin@domain.com', combos, update_fields)
# perform a simple update of type without changing hypervisor to
# reach specific if clause in resource api code
update_fields = {
'type': 'LPAR'
}
self._test_update_valid_fields(
'user_hw_admin@domain.com',
[('user_user@domain.com', 'user_user@domain.com')],
update_fields)
# test_update_valid_fields()
def test_add_update_assoc_error(self):
wrong_fields = [
('project', 'some_project'),
('owner', 'some_owner'),
('hypervisor', 'some_hypervisor'),
('state', 'some_state'),
('type', 'some_type'),
('model', 'some_model'),
]
self._test_add_update_assoc_error(
'user_project_admin@domain.com', wrong_fields)
# test_add_update_assoc_error()
def test_update_no_role(self):
update_fields = {
'name': 'this_should_not_work',
}
logins = [
'user_user@domain.com',
]
self._test_update_no_role(
'user_hw_admin@domain.com', logins, update_fields)
# restricted users have no read access
logins = [
'user_restricted@domain.com',
]
self._test_update_no_role(
'user_hw_admin@domain.com', logins, update_fields, http_code=404)
# test_update_no_role()
# TestSystem
| true | true |
f731e9f673edb980d0ac68138a8573de0e682d5d | 1,411 | py | Python | python/ledger/ledger.py | fruit-in/exercism-solution | 942f824e2dacd23ee9fd12f5b3e8abed0aeb9581 | [
"MIT"
] | 9 | 2020-12-12T03:29:33.000Z | 2021-08-11T13:08:06.000Z | python/ledger/ledger.py | fruit-in/exercism-solution | 942f824e2dacd23ee9fd12f5b3e8abed0aeb9581 | [
"MIT"
] | null | null | null | python/ledger/ledger.py | fruit-in/exercism-solution | 942f824e2dacd23ee9fd12f5b3e8abed0aeb9581 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
def create_entry(date, description, change):
return (list(map(int, date.split("-"))), description, change)
def format_entries(currency, locale, entries):
if currency == "USD":
symbol = "$"
elif currency == "EUR":
symbol = u"€"
if locale == "en_US":
header = ("Date", "Description", "Change")
date_fmt = "{1:02}/{2:02}/{0}"
number_po_fmt = "{}{} "
number_ne_fmt = "({}{})"
thousands = ","
decimal = "."
elif locale == "nl_NL":
header = ("Datum", "Omschrijving", "Verandering")
date_fmt = "{2:02}-{1:02}-{0}"
number_po_fmt = "{} {} "
number_ne_fmt = "{} -{} "
thousands = "."
decimal = ","
ret = ["{:<11}| {:<26}| {:<13}".format(*header)]
for date, description, change in sorted(entries):
date = date_fmt.format(*date)
if len(description) > 25:
description = description[:22] + "... "
change_abs = "{:.2f}".format(abs(change) / 100).replace(".", decimal)
for i in range(len(change_abs) - 6, 0, -3):
change_abs = change_abs[:i] + thousands + change_abs[i:]
number_fmt = number_ne_fmt if change < 0 else number_po_fmt
change = number_fmt.format(symbol, change_abs)
ret.append("{:<11}| {:<26}| {:>13}".format(date, description, change))
return "\n".join(ret)
| 32.068182 | 78 | 0.530829 |
def create_entry(date, description, change):
return (list(map(int, date.split("-"))), description, change)
def format_entries(currency, locale, entries):
if currency == "USD":
symbol = "$"
elif currency == "EUR":
symbol = u"€"
if locale == "en_US":
header = ("Date", "Description", "Change")
date_fmt = "{1:02}/{2:02}/{0}"
number_po_fmt = "{}{} "
number_ne_fmt = "({}{})"
thousands = ","
decimal = "."
elif locale == "nl_NL":
header = ("Datum", "Omschrijving", "Verandering")
date_fmt = "{2:02}-{1:02}-{0}"
number_po_fmt = "{} {} "
number_ne_fmt = "{} -{} "
thousands = "."
decimal = ","
ret = ["{:<11}| {:<26}| {:<13}".format(*header)]
for date, description, change in sorted(entries):
date = date_fmt.format(*date)
if len(description) > 25:
description = description[:22] + "... "
change_abs = "{:.2f}".format(abs(change) / 100).replace(".", decimal)
for i in range(len(change_abs) - 6, 0, -3):
change_abs = change_abs[:i] + thousands + change_abs[i:]
number_fmt = number_ne_fmt if change < 0 else number_po_fmt
change = number_fmt.format(symbol, change_abs)
ret.append("{:<11}| {:<26}| {:>13}".format(date, description, change))
return "\n".join(ret)
| true | true |
f731ea57e184ee823c29cfdd9e927eb1b20e4e3c | 9,625 | py | Python | portality/api/v1/data_objects/article.py | glauberm/doaj | dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7 | [
"Apache-2.0"
] | null | null | null | portality/api/v1/data_objects/article.py | glauberm/doaj | dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7 | [
"Apache-2.0"
] | null | null | null | portality/api/v1/data_objects/article.py | glauberm/doaj | dc24dfcbf4a9f02ce5c9b09b611a5766ea5742f7 | [
"Apache-2.0"
] | null | null | null | from portality.lib import dataobj, swagger
from portality import models
from portality.util import normalise_issn
from copy import deepcopy
BASE_ARTICLE_STRUCT = {
"fields": {
"id": {"coerce": "unicode"}, # Note that we'll leave these in for ease of use by the
"created_date": {"coerce": "utcdatetime"}, # caller, but we'll need to ignore them on the conversion
"last_updated": {"coerce": "utcdatetime"} # to the real object
},
"objects": ["admin", "bibjson"],
"structs": {
"admin": {
"fields": {
"in_doaj": {"coerce": "bool", "get__default": False},
"seal": {"coerce": "bool", "get__default": False},
"publisher_record_id": {"coerce": "unicode"},
"upload_id": {"coerce": "unicode"}
}
},
"bibjson": {
"fields": {
"title": {"coerce": "unicode"},
"year": {"coerce": "unicode"},
"month": {"coerce": "unicode"},
"abstract": {"coerce": "unicode"}
},
"lists": {
"identifier": {"contains": "object"},
"link": {"contains": "object"},
"author": {"contains": "object"},
"keywords": {"coerce": "unicode", "contains": "field"},
"subject": {"contains": "object"},
},
"objects": [
"journal",
],
"structs": {
"identifier": {
"fields": {
"type": {"coerce": "unicode"},
"id": {"coerce": "unicode"}
}
},
"link": {
"fields": {
"type": {"coerce": "link_type"},
"url": {"coerce": "url"},
"content_type": {"coerce": "link_content_type"}
}
},
"author": {
"fields": {
"name": {"coerce": "unicode"},
"affiliation": {"coerce": "unicode"}
}
},
"journal": {
"fields": {
"start_page": {"coerce": "unicode"},
"end_page": {"coerce": "unicode"},
"volume": {"coerce": "unicode"},
"number": {"coerce": "unicode"},
"publisher": {"coerce": "unicode"},
"title": {"coerce": "unicode"},
"country": {"coerce": "unicode"}
},
"lists": {
"license": {"contains": "object"},
"language": {"coerce": "unicode", "contains": "field"}
},
"structs": {
"license": {
"fields": {
"title": {"coerce": "license"},
"type": {"coerce": "license"},
"url": {"coerce": "unicode"},
"version": {"coerce": "unicode"},
"open_access": {"coerce": "bool"},
}
}
}
},
"subject": {
"fields": {
"scheme": {"coerce": "unicode"},
"term": {"coerce": "unicode"},
"code": {"coerce": "unicode"}
}
},
}
}
}
}
INCOMING_ARTICLE_REQUIRED = {
"required": ["bibjson"],
"structs": {
"bibjson": {
"required": [
"title",
# "author", # author no longer required
"identifier" # One type of identifier is required
],
"structs": {
"identifier": {
"required": ["type", "id"]
},
"link": {
"required": ["type", "url"]
},
"author": {
"required": ["name"]
}
}
}
}
}
BASE_ARTICLE_COERCE = deepcopy(dataobj.DataObj.DEFAULT_COERCE)
BASE_ARTICLE_COERCE["link_type"] = dataobj.string_canonicalise(["fulltext"], allow_fail=False)
BASE_ARTICLE_COERCE["link_type_optional"] = dataobj.string_canonicalise(["fulltext"], allow_fail=True)
BASE_ARTICLE_COERCE["link_content_type"] = dataobj.string_canonicalise(["PDF", "HTML", "ePUB", "XML"], allow_fail=True)
BASE_ARTICLE_SWAGGER_TRANS = deepcopy(swagger.SwaggerSupport.DEFAULT_SWAGGER_TRANS)
BASE_ARTICLE_SWAGGER_TRANS["link_type"] = {"type": "string", "format": "link_type"}, # TODO extend swagger-ui with support for this format and let it produce example values etc. on the front-end
BASE_ARTICLE_SWAGGER_TRANS["link_type_optional"] = {"type": "string", "format": "link_type_optional"}, # TODO extend swagger-ui with support for this format and let it produce example values etc. on the front-end
BASE_ARTICLE_SWAGGER_TRANS["link_content_type"] = {"type": "string", "format": "link_content_type"}, # TODO extend swagger-ui with support for this format and let it produce example values etc. on the front-end
class IncomingArticleDO(dataobj.DataObj, swagger.SwaggerSupport):
def __init__(self, raw=None):
self._add_struct(BASE_ARTICLE_STRUCT)
self._add_struct(INCOMING_ARTICLE_REQUIRED)
super(IncomingArticleDO, self).__init__(raw, construct_silent_prune=True, expose_data=True, coerce_map=BASE_ARTICLE_COERCE, swagger_trans=BASE_ARTICLE_SWAGGER_TRANS)
def custom_validate(self):
# only attempt to validate if this is not a blank object
if len(self.data.keys()) == 0:
return
# at least one of print issn / e-issn, and they must be different
#
# check that there are identifiers at all
identifiers = self.bibjson.identifier
if identifiers is None or len(identifiers) == 0:
raise dataobj.DataStructureException("You must specify at least one of P-ISSN or E-ISSN in bibjson.identifier")
# extract the p/e-issn identifier objects
pissn = None
eissn = None
for ident in identifiers:
if ident.type == "pissn":
pissn = ident
elif ident.type == "eissn":
eissn = ident
# check that at least one of them appears
if pissn is None and eissn is None:
raise dataobj.DataStructureException("You must specify at least one of P-ISSN or E-ISSN in bibjson.identifier")
# normalise the ids
if pissn is not None:
pissn.id = normalise_issn(pissn.id)
if eissn is not None:
eissn.id = normalise_issn(eissn.id)
# check they are not the same
if pissn is not None and eissn is not None:
if pissn.id == eissn.id:
raise dataobj.DataStructureException("P-ISSN and E-ISSN should be different")
# check the number of keywords is no more than 6
if len(self.bibjson.keywords) > 6:
raise dataobj.DataStructureException("bibjson.keywords may only contain a maximum of 6 keywords")
def to_article_model(self, existing=None):
dat = deepcopy(self.data)
if "journal" in dat["bibjson"] and "start_page" in dat["bibjson"].get("journal", {}):
dat["bibjson"]["start_page"] = dat["bibjson"]["journal"]["start_page"]
del dat["bibjson"]["journal"]["start_page"]
if "journal" in dat["bibjson"] and "end_page" in dat["bibjson"].get("journal", {}):
dat["bibjson"]["end_page"] = dat["bibjson"]["journal"]["end_page"]
del dat["bibjson"]["journal"]["end_page"]
# clear out fields that we don't accept via the API
if "admin" in dat and "in_doaj" in dat["admin"]:
del dat["admin"]["in_doaj"]
if "admin" in dat and "seal" in dat["admin"]:
del dat["admin"]["seal"]
if "admin" in dat and "upload_id" in dat["admin"]:
del dat["admin"]["upload_id"]
if existing is None:
return models.Article(**dat)
else:
merged = dataobj.merge_outside_construct(self._struct, dat, existing.data)
return models.Article(**merged)
class OutgoingArticleDO(dataobj.DataObj, swagger.SwaggerSupport):
def __init__(self, raw=None):
self._add_struct(BASE_ARTICLE_STRUCT)
super(OutgoingArticleDO, self).__init__(raw, construct_silent_prune=True, expose_data=True, coerce_map=BASE_ARTICLE_COERCE, swagger_trans=BASE_ARTICLE_SWAGGER_TRANS)
@classmethod
def from_model(cls, am):
assert isinstance(am, models.Article)
dat = deepcopy(am.data)
# Fix some inconsistencies with the model - start and end pages should be in bibjson
if "start_page" in dat["bibjson"]:
dat["bibjson"].get("journal", {})["start_page"] = dat["bibjson"]["start_page"]
del dat["bibjson"]["start_page"]
if "end_page" in dat["bibjson"]:
dat["bibjson"].get("journal", {})["end_page"] = dat["bibjson"]["end_page"]
del dat["bibjson"]["end_page"]
return cls(dat)
@classmethod
def from_model_by_id(cls, id_):
a = models.Article.pull(id_)
return cls.from_model(a)
| 40.611814 | 213 | 0.511896 | from portality.lib import dataobj, swagger
from portality import models
from portality.util import normalise_issn
from copy import deepcopy
BASE_ARTICLE_STRUCT = {
"fields": {
"id": {"coerce": "unicode"},
"created_date": {"coerce": "utcdatetime"}, # caller, but we'll need to ignore them on the conversion
"last_updated": {"coerce": "utcdatetime"}
},
"objects": ["admin", "bibjson"],
"structs": {
"admin": {
"fields": {
"in_doaj": {"coerce": "bool", "get__default": False},
"seal": {"coerce": "bool", "get__default": False},
"publisher_record_id": {"coerce": "unicode"},
"upload_id": {"coerce": "unicode"}
}
},
"bibjson": {
"fields": {
"title": {"coerce": "unicode"},
"year": {"coerce": "unicode"},
"month": {"coerce": "unicode"},
"abstract": {"coerce": "unicode"}
},
"lists": {
"identifier": {"contains": "object"},
"link": {"contains": "object"},
"author": {"contains": "object"},
"keywords": {"coerce": "unicode", "contains": "field"},
"subject": {"contains": "object"},
},
"objects": [
"journal",
],
"structs": {
"identifier": {
"fields": {
"type": {"coerce": "unicode"},
"id": {"coerce": "unicode"}
}
},
"link": {
"fields": {
"type": {"coerce": "link_type"},
"url": {"coerce": "url"},
"content_type": {"coerce": "link_content_type"}
}
},
"author": {
"fields": {
"name": {"coerce": "unicode"},
"affiliation": {"coerce": "unicode"}
}
},
"journal": {
"fields": {
"start_page": {"coerce": "unicode"},
"end_page": {"coerce": "unicode"},
"volume": {"coerce": "unicode"},
"number": {"coerce": "unicode"},
"publisher": {"coerce": "unicode"},
"title": {"coerce": "unicode"},
"country": {"coerce": "unicode"}
},
"lists": {
"license": {"contains": "object"},
"language": {"coerce": "unicode", "contains": "field"}
},
"structs": {
"license": {
"fields": {
"title": {"coerce": "license"},
"type": {"coerce": "license"},
"url": {"coerce": "unicode"},
"version": {"coerce": "unicode"},
"open_access": {"coerce": "bool"},
}
}
}
},
"subject": {
"fields": {
"scheme": {"coerce": "unicode"},
"term": {"coerce": "unicode"},
"code": {"coerce": "unicode"}
}
},
}
}
}
}
INCOMING_ARTICLE_REQUIRED = {
"required": ["bibjson"],
"structs": {
"bibjson": {
"required": [
"title",
r"
],
"structs": {
"identifier": {
"required": ["type", "id"]
},
"link": {
"required": ["type", "url"]
},
"author": {
"required": ["name"]
}
}
}
}
}
BASE_ARTICLE_COERCE = deepcopy(dataobj.DataObj.DEFAULT_COERCE)
BASE_ARTICLE_COERCE["link_type"] = dataobj.string_canonicalise(["fulltext"], allow_fail=False)
BASE_ARTICLE_COERCE["link_type_optional"] = dataobj.string_canonicalise(["fulltext"], allow_fail=True)
BASE_ARTICLE_COERCE["link_content_type"] = dataobj.string_canonicalise(["PDF", "HTML", "ePUB", "XML"], allow_fail=True)
BASE_ARTICLE_SWAGGER_TRANS = deepcopy(swagger.SwaggerSupport.DEFAULT_SWAGGER_TRANS)
BASE_ARTICLE_SWAGGER_TRANS["link_type"] = {"type": "string", "format": "link_type"},
BASE_ARTICLE_SWAGGER_TRANS["link_type_optional"] = {"type": "string", "format": "link_type_optional"},
BASE_ARTICLE_SWAGGER_TRANS["link_content_type"] = {"type": "string", "format": "link_content_type"},
class IncomingArticleDO(dataobj.DataObj, swagger.SwaggerSupport):
def __init__(self, raw=None):
self._add_struct(BASE_ARTICLE_STRUCT)
self._add_struct(INCOMING_ARTICLE_REQUIRED)
super(IncomingArticleDO, self).__init__(raw, construct_silent_prune=True, expose_data=True, coerce_map=BASE_ARTICLE_COERCE, swagger_trans=BASE_ARTICLE_SWAGGER_TRANS)
def custom_validate(self):
if len(self.data.keys()) == 0:
return
identifiers = self.bibjson.identifier
if identifiers is None or len(identifiers) == 0:
raise dataobj.DataStructureException("You must specify at least one of P-ISSN or E-ISSN in bibjson.identifier")
pissn = None
eissn = None
for ident in identifiers:
if ident.type == "pissn":
pissn = ident
elif ident.type == "eissn":
eissn = ident
if pissn is None and eissn is None:
raise dataobj.DataStructureException("You must specify at least one of P-ISSN or E-ISSN in bibjson.identifier")
if pissn is not None:
pissn.id = normalise_issn(pissn.id)
if eissn is not None:
eissn.id = normalise_issn(eissn.id)
if pissn is not None and eissn is not None:
if pissn.id == eissn.id:
raise dataobj.DataStructureException("P-ISSN and E-ISSN should be different")
if len(self.bibjson.keywords) > 6:
raise dataobj.DataStructureException("bibjson.keywords may only contain a maximum of 6 keywords")
def to_article_model(self, existing=None):
dat = deepcopy(self.data)
if "journal" in dat["bibjson"] and "start_page" in dat["bibjson"].get("journal", {}):
dat["bibjson"]["start_page"] = dat["bibjson"]["journal"]["start_page"]
del dat["bibjson"]["journal"]["start_page"]
if "journal" in dat["bibjson"] and "end_page" in dat["bibjson"].get("journal", {}):
dat["bibjson"]["end_page"] = dat["bibjson"]["journal"]["end_page"]
del dat["bibjson"]["journal"]["end_page"]
if "admin" in dat and "in_doaj" in dat["admin"]:
del dat["admin"]["in_doaj"]
if "admin" in dat and "seal" in dat["admin"]:
del dat["admin"]["seal"]
if "admin" in dat and "upload_id" in dat["admin"]:
del dat["admin"]["upload_id"]
if existing is None:
return models.Article(**dat)
else:
merged = dataobj.merge_outside_construct(self._struct, dat, existing.data)
return models.Article(**merged)
class OutgoingArticleDO(dataobj.DataObj, swagger.SwaggerSupport):
def __init__(self, raw=None):
self._add_struct(BASE_ARTICLE_STRUCT)
super(OutgoingArticleDO, self).__init__(raw, construct_silent_prune=True, expose_data=True, coerce_map=BASE_ARTICLE_COERCE, swagger_trans=BASE_ARTICLE_SWAGGER_TRANS)
@classmethod
def from_model(cls, am):
assert isinstance(am, models.Article)
dat = deepcopy(am.data)
# Fix some inconsistencies with the model - start and end pages should be in bibjson
if "start_page" in dat["bibjson"]:
dat["bibjson"].get("journal", {})["start_page"] = dat["bibjson"]["start_page"]
del dat["bibjson"]["start_page"]
if "end_page" in dat["bibjson"]:
dat["bibjson"].get("journal", {})["end_page"] = dat["bibjson"]["end_page"]
del dat["bibjson"]["end_page"]
return cls(dat)
@classmethod
def from_model_by_id(cls, id_):
a = models.Article.pull(id_)
return cls.from_model(a)
| true | true |
f731eaedc7007e19c5df89d50ff508da3dfd5341 | 1,892 | py | Python | test/benchmark/time_examples.py | jni/asv | f1ec1c157d52c77a799853062dac3468fab3e2ab | [
"BSD-3-Clause"
] | null | null | null | test/benchmark/time_examples.py | jni/asv | f1ec1c157d52c77a799853062dac3468fab3e2ab | [
"BSD-3-Clause"
] | 3 | 2018-07-26T17:56:30.000Z | 2018-07-27T20:23:27.000Z | test/benchmark/time_examples.py | jni/asv | f1ec1c157d52c77a799853062dac3468fab3e2ab | [
"BSD-3-Clause"
] | 3 | 2018-07-25T22:53:31.000Z | 2018-09-16T06:14:43.000Z | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
if sys.version_info[0] == 3:
xrange = range
import warnings
class TimeSuite:
sample_time = 0.1
def setup(self):
self.n = 100
def time_example_benchmark_1(self):
s = ''
for i in xrange(self.n):
s = s + 'x'
def time_example_benchmark_2(self):
s = []
for i in xrange(self.n):
s.append('x')
''.join(s)
class TimeSuiteSub(TimeSuite):
pass
def time_with_warnings():
print('hi')
warnings.warn('before')
1 / 0
warnings.warn('after')
time_with_warnings.sample_time = 0.1
def time_with_timeout():
while True:
pass
time_with_timeout.timeout = 0.1
class TimeWithRepeat(object):
# Check that setup is re-run on each repeat
called = None
number = 1
repeat = 10
count = 0
warmup_time = 0
def setup(self):
assert self.called is None
self.called = False
def teardown(self):
assert self.called is True
self.called = None
print("<%d>" % (self.count,))
def time_it(self):
assert self.called is False
self.called = True
self.count += 1
class TimeWithRepeatCalibrate(object):
# Check that setup is re-run on each repeat, apart from
# autodetection of suitable `number`
repeat = 1
number = 0
sample_time = 0.1
def setup(self):
print("setup")
def time_it(self):
pass
class TimeWithBadTimer(object):
# Check that calibration of number is robust against bad timers
repeat = 1
number = 0
sample_time = 0.1
timeout = 5
def timer(self):
return 0.0
def time_it(self):
pass
| 18.732673 | 67 | 0.598309 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
if sys.version_info[0] == 3:
xrange = range
import warnings
class TimeSuite:
sample_time = 0.1
def setup(self):
self.n = 100
def time_example_benchmark_1(self):
s = ''
for i in xrange(self.n):
s = s + 'x'
def time_example_benchmark_2(self):
s = []
for i in xrange(self.n):
s.append('x')
''.join(s)
class TimeSuiteSub(TimeSuite):
pass
def time_with_warnings():
print('hi')
warnings.warn('before')
1 / 0
warnings.warn('after')
time_with_warnings.sample_time = 0.1
def time_with_timeout():
while True:
pass
time_with_timeout.timeout = 0.1
class TimeWithRepeat(object):
called = None
number = 1
repeat = 10
count = 0
warmup_time = 0
def setup(self):
assert self.called is None
self.called = False
def teardown(self):
assert self.called is True
self.called = None
print("<%d>" % (self.count,))
def time_it(self):
assert self.called is False
self.called = True
self.count += 1
class TimeWithRepeatCalibrate(object):
repeat = 1
number = 0
sample_time = 0.1
def setup(self):
print("setup")
def time_it(self):
pass
class TimeWithBadTimer(object):
repeat = 1
number = 0
sample_time = 0.1
timeout = 5
def timer(self):
return 0.0
def time_it(self):
pass
| true | true |
f731eba05d25ea0e31730128b001cfb74e109186 | 2,039 | py | Python | env/bin/thresholder.py | tarasen1/Django-Agregator-Site | 6d873fa3e845776242bca150330754fc98f82f24 | [
"Apache-2.0"
] | null | null | null | env/bin/thresholder.py | tarasen1/Django-Agregator-Site | 6d873fa3e845776242bca150330754fc98f82f24 | [
"Apache-2.0"
] | null | null | null | env/bin/thresholder.py | tarasen1/Django-Agregator-Site | 6d873fa3e845776242bca150330754fc98f82f24 | [
"Apache-2.0"
] | null | null | null | #!/home/tarasen/Studying/3kurs/kursova/Django-Agregator-Site/env/bin/python3
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Frame):
def __init__(self, master, im, value=128):
tkinter.Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = tkinter.Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=tkinter.NW)
self.canvas.pack()
scale = tkinter.Scale(self, orient=tkinter.HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale,
length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = float(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=tkinter.NW,
tags="overlay")
# --------------------------------------------------------------------
# main
if len(sys.argv) != 2:
print("Usage: thresholder file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
| 25.17284 | 79 | 0.59539 |
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
class UI(tkinter.Frame):
def __init__(self, master, im, value=128):
tkinter.Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = tkinter.Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=tkinter.NW)
self.canvas.pack()
scale = tkinter.Scale(self, orient=tkinter.HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale,
length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
def update_scale(self, value):
self.value = float(value)
self.redraw()
def redraw(self, event=None):
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=tkinter.NW,
tags="overlay")
if len(sys.argv) != 2:
print("Usage: thresholder file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
UI(root, im).pack()
root.mainloop()
| true | true |
f731ebd92d3e6a4eb9b1d1ad5a0b1c60aa2b84c8 | 235 | py | Python | main.py | gesa23/ds1hw1 | fe69bcfd311467611a9534bbeaa7705ed95fafdb | [
"MIT"
] | null | null | null | main.py | gesa23/ds1hw1 | fe69bcfd311467611a9534bbeaa7705ed95fafdb | [
"MIT"
] | null | null | null | main.py | gesa23/ds1hw1 | fe69bcfd311467611a9534bbeaa7705ed95fafdb | [
"MIT"
] | null | null | null | from sklearn.datasets import load_iris
import pandas as pd
ds = load_iris()
df = pd.DataFrame(data= ds["data"], columns=ds["feature_names"])
target_names = [ds.target_names[x] for x in ds.target]
df['species'] = target_names
print(df) | 29.375 | 64 | 0.744681 | from sklearn.datasets import load_iris
import pandas as pd
ds = load_iris()
df = pd.DataFrame(data= ds["data"], columns=ds["feature_names"])
target_names = [ds.target_names[x] for x in ds.target]
df['species'] = target_names
print(df) | true | true |
f731ebf69d1b3147077df832bd6fc49c83be8a50 | 5,683 | py | Python | core/target_assigner.py | jacke121/MBMD | 2daf5edb4fb40ee652baead4f9332ca00fa111a5 | [
"MIT"
] | 220 | 2018-09-17T15:42:54.000Z | 2021-09-13T13:14:22.000Z | core/target_assigner.py | jacke121/MBMD | 2daf5edb4fb40ee652baead4f9332ca00fa111a5 | [
"MIT"
] | 12 | 2018-09-19T09:30:42.000Z | 2019-07-01T04:03:51.000Z | core/target_assigner.py | jacke121/MBMD | 2daf5edb4fb40ee652baead4f9332ca00fa111a5 | [
"MIT"
] | 60 | 2018-09-18T00:29:50.000Z | 2021-02-22T03:55:19.000Z | from object_detection.core.target_assigner import TargetAssigner
import tensorflow as tf
from object_detection.core import box_list
class TargetAssignerExtend(TargetAssigner):
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,
**params):
"""Assign classification and regression targets to each anchor.
The extended version assign 0 weights to negative (0) box regression.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
shape_assert = tf.assert_equal(tf.shape(groundtruth_labels)[1:],
tf.shape(self._unmatched_cls_target))
with tf.control_dependencies([shape_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
match)
reg_weights = self._create_regression_weights(match, groundtruth_labels)
cls_weights = self._create_classification_weights(
match, self._positive_class_weight, self._negative_class_weight)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _create_regression_weights(self, match, groundtruth_labels):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
reg_weights: a float32 tensor with shape [num_anchors] representing
regression weights
"""
reg_weights = tf.cast(match.matched_column_indicator(), tf.float32)
matched_gt_indices = match.matched_row_indices()
matched_label = tf.gather(groundtruth_labels, matched_gt_indices)
matched_is_foreground = tf.cast(matched_label[:,0] <= 0, tf.float32)
matched_anchor_indices = match.matched_column_indices()
unmatched_ignored_anchor_indices=match.unmatched_or_ignored_column_indices()
unmatched_ignored_reg_weights = tf.gather(reg_weights, unmatched_ignored_anchor_indices)
reg_weights= tf.dynamic_stitch(
[matched_anchor_indices, unmatched_ignored_anchor_indices],
[matched_is_foreground, unmatched_ignored_reg_weights])
return reg_weights
| 52.137615 | 96 | 0.656343 | from object_detection.core.target_assigner import TargetAssigner
import tensorflow as tf
from object_detection.core import box_list
class TargetAssignerExtend(TargetAssigner):
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,
**params):
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
shape_assert = tf.assert_equal(tf.shape(groundtruth_labels)[1:],
tf.shape(self._unmatched_cls_target))
with tf.control_dependencies([shape_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
match)
reg_weights = self._create_regression_weights(match, groundtruth_labels)
cls_weights = self._create_classification_weights(
match, self._positive_class_weight, self._negative_class_weight)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _create_regression_weights(self, match, groundtruth_labels):
reg_weights = tf.cast(match.matched_column_indicator(), tf.float32)
matched_gt_indices = match.matched_row_indices()
matched_label = tf.gather(groundtruth_labels, matched_gt_indices)
matched_is_foreground = tf.cast(matched_label[:,0] <= 0, tf.float32)
matched_anchor_indices = match.matched_column_indices()
unmatched_ignored_anchor_indices=match.unmatched_or_ignored_column_indices()
unmatched_ignored_reg_weights = tf.gather(reg_weights, unmatched_ignored_anchor_indices)
reg_weights= tf.dynamic_stitch(
[matched_anchor_indices, unmatched_ignored_anchor_indices],
[matched_is_foreground, unmatched_ignored_reg_weights])
return reg_weights
| true | true |
f731efb7559b666c4a6065e41852c7907e2377e0 | 1,575 | py | Python | tuframework/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_TopK10.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | tuframework/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_TopK10.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | tuframework/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_Loss_TopK10.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tuframework.training.network_training.tuTrainerV2 import tuframeworkTrainerV2
from tuframework.training.loss_functions.TopK_loss import TopKLoss
class tuframeworkTrainerV2_Loss_TopK10(tuframeworkTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = TopKLoss(k=10)
tuframeworkTrainerV2_Loss_TopK10_copy1 = tuframeworkTrainerV2_Loss_TopK10
tuframeworkTrainerV2_Loss_TopK10_copy2 = tuframeworkTrainerV2_Loss_TopK10
tuframeworkTrainerV2_Loss_TopK10_copy3 = tuframeworkTrainerV2_Loss_TopK10
tuframeworkTrainerV2_Loss_TopK10_copy4 = tuframeworkTrainerV2_Loss_TopK10
| 47.727273 | 114 | 0.780952 |
from tuframework.training.network_training.tuTrainerV2 import tuframeworkTrainerV2
from tuframework.training.loss_functions.TopK_loss import TopKLoss
class tuframeworkTrainerV2_Loss_TopK10(tuframeworkTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = TopKLoss(k=10)
tuframeworkTrainerV2_Loss_TopK10_copy1 = tuframeworkTrainerV2_Loss_TopK10
tuframeworkTrainerV2_Loss_TopK10_copy2 = tuframeworkTrainerV2_Loss_TopK10
tuframeworkTrainerV2_Loss_TopK10_copy3 = tuframeworkTrainerV2_Loss_TopK10
tuframeworkTrainerV2_Loss_TopK10_copy4 = tuframeworkTrainerV2_Loss_TopK10
| true | true |
f731f193fe6801caeef3fb2dbd500edd4a60f9c2 | 966 | py | Python | projects/controllers/experiments/runs/metrics.py | dnlcesilva/projects | 8e367d502aa0ffe3ed29f767d6ab039e6bee282f | [
"Apache-2.0"
] | 1 | 2021-06-26T19:13:49.000Z | 2021-06-26T19:13:49.000Z | projects/controllers/experiments/runs/metrics.py | ndarvishev/projects | 6a9855c5f8af8fad2799ef7a203e126b834c5056 | [
"Apache-2.0"
] | null | null | null | projects/controllers/experiments/runs/metrics.py | ndarvishev/projects | 6a9855c5f8af8fad2799ef7a203e126b834c5056 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Experiments Metrics controller."""
import platiagro
from projects.exceptions import NotFound
class MetricController:
def __init__(self, session):
self.session = session
def list_metrics(self, project_id: str, experiment_id: str, run_id: str, operator_id: str):
"""
Lists all metrics from object storage.
Parameters
----------
project_id : str
experiment_id : str
run_id : str
The run_id. If `run_id=latest`, then returns metrics from the latest run_id.
operator_id : str
Returns
-------
list
A list of metrics.
"""
try:
return platiagro.list_metrics(experiment_id=experiment_id,
operator_id=operator_id,
run_id=run_id)
except FileNotFoundError as e:
raise NotFound(str(e))
| 27.6 | 95 | 0.55176 |
import platiagro
from projects.exceptions import NotFound
class MetricController:
def __init__(self, session):
self.session = session
def list_metrics(self, project_id: str, experiment_id: str, run_id: str, operator_id: str):
try:
return platiagro.list_metrics(experiment_id=experiment_id,
operator_id=operator_id,
run_id=run_id)
except FileNotFoundError as e:
raise NotFound(str(e))
| true | true |
f731f1cac8881b492f15feffbb5c895e2bfc8902 | 13,143 | py | Python | diffusion_logic.py | marklr/vqgan-clip-app | 23edb7ae6234ab177a91865c02be160151fcf566 | [
"MIT"
] | null | null | null | diffusion_logic.py | marklr/vqgan-clip-app | 23edb7ae6234ab177a91865c02be160151fcf566 | [
"MIT"
] | null | null | null | diffusion_logic.py | marklr/vqgan-clip-app | 23edb7ae6234ab177a91865c02be160151fcf566 | [
"MIT"
] | null | null | null | import clip
import sys
import torch
from torchvision import transforms
from torchvision.transforms import functional as TF
from kornia import augmentation, filters
from torch import nn
from torch.nn import functional as F
import math
import lpips
from PIL import Image
sys.path.append("./guided-diffusion")
from guided_diffusion.script_util import (
create_model_and_diffusion,
model_and_diffusion_defaults,
)
DIFFUSION_METHODS_AND_WEIGHTS = {
# "CLIP Guided Diffusion 256x256",
"256x256 HQ Uncond": "256x256_diffusion_uncond.pt",
"512x512 HQ Cond": "512x512_diffusion.pt",
"512x512 HQ Uncond": "512x512_diffusion_uncond_finetune_008100.pt",
}
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def parse_prompt(prompt):
vals = prompt.rsplit(":", 1)
vals = vals + ["", "1"][len(vals) :]
return vals[0], float(vals[1])
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.0):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(
torch.rand([]) ** self.cut_pow * (max_size - min_size) + min_size
)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety : offsety + size, offsetx : offsetx + size]
cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
return torch.cat(cutouts)
def tv_loss(input):
"""L2 total variation loss, as in Mahendran et al."""
input = F.pad(input, (0, 1, 0, 1), "replicate")
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])
def range_loss(input):
return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3])
class CLIPGuidedDiffusion:
def __init__(
self,
prompt: str,
ckpt: str,
batch_size: int = 1,
clip_guidance_scale: float = 1000,
seed: int = 0,
num_steps: int = 1000,
continue_prev_run: bool = True,
skip_timesteps: int = 0,
) -> None:
assert ckpt in DIFFUSION_METHODS_AND_WEIGHTS.keys()
self.ckpt = ckpt
print(self.ckpt)
# Default config
self.model_config = model_and_diffusion_defaults()
self.model_config.update(
{
"attention_resolutions": "32, 16, 8",
"class_cond": True if ckpt == "512x512 HQ Cond" else False,
"diffusion_steps": num_steps,
"rescale_timesteps": True,
"timestep_respacing": str(
num_steps
), # modify this to decrease timesteps
"image_size": 512 if ckpt.startswith("512") else 256,
"learn_sigma": True,
"noise_schedule": "linear",
"num_channels": 256,
"num_head_channels": 64,
"num_res_blocks": 2,
"resblock_updown": True,
"use_checkpoint": False,
"use_fp16": True,
"use_scale_shift_norm": True,
}
)
# Split text by "|" symbol
self.prompts = [phrase.strip() for phrase in prompt.split("|")]
if self.prompts == [""]:
self.prompts = []
self.image_prompts = [] # TODO
self.batch_size = batch_size
# Controls how much the image should look like the prompt.
self.clip_guidance_scale = clip_guidance_scale
# Controls the smoothness of the final output.
self.tv_scale = 150 # TODO add control widget
# Controls how far out of range RGB values are allowed to be.
self.range_scale = 50 # TODO add control widget
self.cutn = 32 # TODO add control widget
self.cutn_batches = 2 # TODO add control widget
self.cut_pow = 0.5 # TODO add control widget
# Removed, repeat batches by triggering a new run
# self.n_batches = 1
# This enhances the effect of the init image, a good value is 1000.
self.init_scale = 1000 # TODO add control widget
# This needs to be between approx. 200 and 500 when using an init image.
# Higher values make the output look more like the init.
self.skip_timesteps = skip_timesteps # TODO add control widget
self.seed = seed
self.continue_prev_run = continue_prev_run
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Using device:", self.device)
def load_model(
self,
model_file_loc="assets/256x256_diffusion_uncond.pt",
prev_model=None,
prev_diffusion=None,
prev_clip_model=None,
) -> None:
if (
self.continue_prev_run is True
and prev_model is not None
and prev_diffusion is not None
and prev_clip_model is not None
):
self.model = prev_model
self.diffusion = prev_diffusion
self.clip_model = prev_clip_model
self.clip_size = self.clip_model.visual.input_resolution
self.normalize = transforms.Normalize(
mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711],
)
else:
self.model, self.diffusion = create_model_and_diffusion(**self.model_config)
self.model.load_state_dict(torch.load(model_file_loc, map_location="cpu"))
self.model.eval().requires_grad_(False).to(self.device)
if self.ckpt == "512x512 HQ Cond":
for name, param in self.model.named_parameters():
if "qkv" in name or "norm" in name or "proj" in name:
param.requires_grad_()
if self.model_config["use_fp16"]:
self.model.convert_to_fp16()
self.clip_model = (
clip.load("ViT-B/16", jit=False)[0]
.eval()
.requires_grad_(False)
.to(self.device)
)
self.clip_size = self.clip_model.visual.input_resolution
self.normalize = transforms.Normalize(
mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711],
)
return self.model, self.diffusion, self.clip_model
def cond_fn_conditional(self, x, t, y=None):
# From 512 HQ notebook using OpenAI's conditional 512x512 model
# TODO: Merge with cond_fn's cutn_batches
with torch.enable_grad():
x = x.detach().requires_grad_()
n = x.shape[0]
my_t = torch.ones([n], device=self.device, dtype=torch.long) * self.cur_t
out = self.diffusion.p_mean_variance(
self.model, x, my_t, clip_denoised=False, model_kwargs={"y": y}
)
fac = self.diffusion.sqrt_one_minus_alphas_cumprod[self.cur_t]
x_in = out["pred_xstart"] * fac + x * (1 - fac)
clip_in = self.normalize(self.make_cutouts(x_in.add(1).div(2)))
image_embeds = (
self.clip_model.encode_image(clip_in).float().view([self.cutn, n, -1])
)
dists = spherical_dist_loss(image_embeds, self.target_embeds.unsqueeze(0))
losses = dists.mean(0)
tv_losses = tv_loss(x_in)
loss = (
losses.sum() * self.clip_guidance_scale
+ tv_losses.sum() * self.tv_scale
)
# TODO: Implement init image
return -torch.autograd.grad(loss, x)[0]
def cond_fn(self, x, t, out, y=None):
n = x.shape[0]
fac = self.diffusion.sqrt_one_minus_alphas_cumprod[self.cur_t]
x_in = out["pred_xstart"] * fac + x * (1 - fac)
x_in_grad = torch.zeros_like(x_in)
for i in range(self.cutn_batches):
clip_in = self.normalize(self.make_cutouts(x_in.add(1).div(2)))
image_embeds = self.clip_model.encode_image(clip_in).float()
dists = spherical_dist_loss(
image_embeds.unsqueeze(1), self.target_embeds.unsqueeze(0)
)
dists = dists.view([self.cutn, n, -1])
losses = dists.mul(self.weights).sum(2).mean(0)
x_in_grad += (
torch.autograd.grad(losses.sum() * self.clip_guidance_scale, x_in)[0]
/ self.cutn_batches
)
tv_losses = tv_loss(x_in)
range_losses = range_loss(out["pred_xstart"])
loss = tv_losses.sum() * self.tv_scale + range_losses.sum() * self.range_scale
if self.init is not None and self.init_scale:
init_losses = self.lpips_model(x_in, self.init)
loss = loss + init_losses.sum() * self.init_scale
x_in_grad += torch.autograd.grad(loss, x_in)[0]
grad = -torch.autograd.grad(x_in, x, x_in_grad)[0]
return grad
def model_init(self, init_image: Image.Image = None) -> None:
if self.seed is not None:
torch.manual_seed(self.seed)
self.make_cutouts = MakeCutouts(self.clip_size, self.cutn, self.cut_pow)
self.side_x = self.side_y = self.model_config["image_size"]
self.target_embeds, self.weights = [], []
for prompt in self.prompts:
txt, weight = parse_prompt(prompt)
self.target_embeds.append(
self.clip_model.encode_text(clip.tokenize(txt).to(self.device)).float()
)
self.weights.append(weight)
# TODO: Implement image prompt parsing
# for prompt in self.image_prompts:
# path, weight = parse_prompt(prompt)
# img = Image.open(fetch(path)).convert('RGB')
# img = TF.resize(img, min(side_x, side_y, *img.size), transforms.InterpolationMode.LANCZOS)
# batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))
# embed = clip_model.encode_image(normalize(batch)).float()
# target_embeds.append(embed)
# weights.extend([weight / cutn] * cutn)
self.target_embeds = torch.cat(self.target_embeds)
self.weights = torch.tensor(self.weights, device=self.device)
if self.weights.sum().abs() < 1e-3:
raise RuntimeError("The weights must not sum to 0.")
self.weights /= self.weights.sum().abs()
self.init = None
if init_image is not None:
self.init = init_image.resize((self.side_x, self.side_y), Image.LANCZOS)
self.init = (
TF.to_tensor(self.init).to(self.device).unsqueeze(0).mul(2).sub(1)
)
# LPIPS not required if init_image not used!
if self.init is None:
self.lpips_model = None
else:
self.lpips_model = lpips.LPIPS(net="vgg").to(self.device)
if self.model_config["timestep_respacing"].startswith("ddim"):
sample_fn = self.diffusion.ddim_sample_loop_progressive
else:
sample_fn = self.diffusion.p_sample_loop_progressive
self.cur_t = self.diffusion.num_timesteps - self.skip_timesteps - 1
if self.ckpt == "512x512 HQ Cond":
print("Using conditional sampling fn")
self.samples = sample_fn(
self.model,
(self.batch_size, 3, self.side_y, self.side_x),
clip_denoised=False,
model_kwargs={
"y": torch.zeros(
[self.batch_size], device=self.device, dtype=torch.long
)
},
cond_fn=self.cond_fn_conditional,
progress=True,
skip_timesteps=self.skip_timesteps,
init_image=self.init,
randomize_class=True,
)
else:
print("Using unconditional sampling fn")
self.samples = sample_fn(
self.model,
(self.batch_size, 3, self.side_y, self.side_x),
clip_denoised=False,
model_kwargs={},
cond_fn=self.cond_fn,
progress=True,
skip_timesteps=self.skip_timesteps,
init_image=self.init,
randomize_class=True,
cond_fn_with_grad=True,
)
self.samplesgen = enumerate(self.samples)
def iterate(self):
self.cur_t -= 1
_, sample = next(self.samplesgen)
ims = []
for _, image in enumerate(sample["pred_xstart"]):
im = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))
ims.append(im)
return ims
| 37.338068 | 104 | 0.575668 | import clip
import sys
import torch
from torchvision import transforms
from torchvision.transforms import functional as TF
from kornia import augmentation, filters
from torch import nn
from torch.nn import functional as F
import math
import lpips
from PIL import Image
sys.path.append("./guided-diffusion")
from guided_diffusion.script_util import (
create_model_and_diffusion,
model_and_diffusion_defaults,
)
DIFFUSION_METHODS_AND_WEIGHTS = {
"256x256 HQ Uncond": "256x256_diffusion_uncond.pt",
"512x512 HQ Cond": "512x512_diffusion.pt",
"512x512 HQ Uncond": "512x512_diffusion_uncond_finetune_008100.pt",
}
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def parse_prompt(prompt):
vals = prompt.rsplit(":", 1)
vals = vals + ["", "1"][len(vals) :]
return vals[0], float(vals[1])
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.0):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(
torch.rand([]) ** self.cut_pow * (max_size - min_size) + min_size
)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety : offsety + size, offsetx : offsetx + size]
cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
return torch.cat(cutouts)
def tv_loss(input):
input = F.pad(input, (0, 1, 0, 1), "replicate")
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
return (x_diff ** 2 + y_diff ** 2).mean([1, 2, 3])
def range_loss(input):
return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3])
class CLIPGuidedDiffusion:
def __init__(
self,
prompt: str,
ckpt: str,
batch_size: int = 1,
clip_guidance_scale: float = 1000,
seed: int = 0,
num_steps: int = 1000,
continue_prev_run: bool = True,
skip_timesteps: int = 0,
) -> None:
assert ckpt in DIFFUSION_METHODS_AND_WEIGHTS.keys()
self.ckpt = ckpt
print(self.ckpt)
self.model_config = model_and_diffusion_defaults()
self.model_config.update(
{
"attention_resolutions": "32, 16, 8",
"class_cond": True if ckpt == "512x512 HQ Cond" else False,
"diffusion_steps": num_steps,
"rescale_timesteps": True,
"timestep_respacing": str(
num_steps
),
"image_size": 512 if ckpt.startswith("512") else 256,
"learn_sigma": True,
"noise_schedule": "linear",
"num_channels": 256,
"num_head_channels": 64,
"num_res_blocks": 2,
"resblock_updown": True,
"use_checkpoint": False,
"use_fp16": True,
"use_scale_shift_norm": True,
}
)
self.prompts = [phrase.strip() for phrase in prompt.split("|")]
if self.prompts == [""]:
self.prompts = []
self.image_prompts = []
self.batch_size = batch_size
self.clip_guidance_scale = clip_guidance_scale
self.tv_scale = 150
self.range_scale = 50
self.cutn = 32
self.cutn_batches = 2
self.cut_pow = 0.5
self.init_scale = 1000
self.skip_timesteps = skip_timesteps
self.seed = seed
self.continue_prev_run = continue_prev_run
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Using device:", self.device)
def load_model(
self,
model_file_loc="assets/256x256_diffusion_uncond.pt",
prev_model=None,
prev_diffusion=None,
prev_clip_model=None,
) -> None:
if (
self.continue_prev_run is True
and prev_model is not None
and prev_diffusion is not None
and prev_clip_model is not None
):
self.model = prev_model
self.diffusion = prev_diffusion
self.clip_model = prev_clip_model
self.clip_size = self.clip_model.visual.input_resolution
self.normalize = transforms.Normalize(
mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711],
)
else:
self.model, self.diffusion = create_model_and_diffusion(**self.model_config)
self.model.load_state_dict(torch.load(model_file_loc, map_location="cpu"))
self.model.eval().requires_grad_(False).to(self.device)
if self.ckpt == "512x512 HQ Cond":
for name, param in self.model.named_parameters():
if "qkv" in name or "norm" in name or "proj" in name:
param.requires_grad_()
if self.model_config["use_fp16"]:
self.model.convert_to_fp16()
self.clip_model = (
clip.load("ViT-B/16", jit=False)[0]
.eval()
.requires_grad_(False)
.to(self.device)
)
self.clip_size = self.clip_model.visual.input_resolution
self.normalize = transforms.Normalize(
mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711],
)
return self.model, self.diffusion, self.clip_model
def cond_fn_conditional(self, x, t, y=None):
# TODO: Merge with cond_fn's cutn_batches
with torch.enable_grad():
x = x.detach().requires_grad_()
n = x.shape[0]
my_t = torch.ones([n], device=self.device, dtype=torch.long) * self.cur_t
out = self.diffusion.p_mean_variance(
self.model, x, my_t, clip_denoised=False, model_kwargs={"y": y}
)
fac = self.diffusion.sqrt_one_minus_alphas_cumprod[self.cur_t]
x_in = out["pred_xstart"] * fac + x * (1 - fac)
clip_in = self.normalize(self.make_cutouts(x_in.add(1).div(2)))
image_embeds = (
self.clip_model.encode_image(clip_in).float().view([self.cutn, n, -1])
)
dists = spherical_dist_loss(image_embeds, self.target_embeds.unsqueeze(0))
losses = dists.mean(0)
tv_losses = tv_loss(x_in)
loss = (
losses.sum() * self.clip_guidance_scale
+ tv_losses.sum() * self.tv_scale
)
return -torch.autograd.grad(loss, x)[0]
def cond_fn(self, x, t, out, y=None):
n = x.shape[0]
fac = self.diffusion.sqrt_one_minus_alphas_cumprod[self.cur_t]
x_in = out["pred_xstart"] * fac + x * (1 - fac)
x_in_grad = torch.zeros_like(x_in)
for i in range(self.cutn_batches):
clip_in = self.normalize(self.make_cutouts(x_in.add(1).div(2)))
image_embeds = self.clip_model.encode_image(clip_in).float()
dists = spherical_dist_loss(
image_embeds.unsqueeze(1), self.target_embeds.unsqueeze(0)
)
dists = dists.view([self.cutn, n, -1])
losses = dists.mul(self.weights).sum(2).mean(0)
x_in_grad += (
torch.autograd.grad(losses.sum() * self.clip_guidance_scale, x_in)[0]
/ self.cutn_batches
)
tv_losses = tv_loss(x_in)
range_losses = range_loss(out["pred_xstart"])
loss = tv_losses.sum() * self.tv_scale + range_losses.sum() * self.range_scale
if self.init is not None and self.init_scale:
init_losses = self.lpips_model(x_in, self.init)
loss = loss + init_losses.sum() * self.init_scale
x_in_grad += torch.autograd.grad(loss, x_in)[0]
grad = -torch.autograd.grad(x_in, x, x_in_grad)[0]
return grad
def model_init(self, init_image: Image.Image = None) -> None:
if self.seed is not None:
torch.manual_seed(self.seed)
self.make_cutouts = MakeCutouts(self.clip_size, self.cutn, self.cut_pow)
self.side_x = self.side_y = self.model_config["image_size"]
self.target_embeds, self.weights = [], []
for prompt in self.prompts:
txt, weight = parse_prompt(prompt)
self.target_embeds.append(
self.clip_model.encode_text(clip.tokenize(txt).to(self.device)).float()
)
self.weights.append(weight)
self.target_embeds = torch.cat(self.target_embeds)
self.weights = torch.tensor(self.weights, device=self.device)
if self.weights.sum().abs() < 1e-3:
raise RuntimeError("The weights must not sum to 0.")
self.weights /= self.weights.sum().abs()
self.init = None
if init_image is not None:
self.init = init_image.resize((self.side_x, self.side_y), Image.LANCZOS)
self.init = (
TF.to_tensor(self.init).to(self.device).unsqueeze(0).mul(2).sub(1)
)
if self.init is None:
self.lpips_model = None
else:
self.lpips_model = lpips.LPIPS(net="vgg").to(self.device)
if self.model_config["timestep_respacing"].startswith("ddim"):
sample_fn = self.diffusion.ddim_sample_loop_progressive
else:
sample_fn = self.diffusion.p_sample_loop_progressive
self.cur_t = self.diffusion.num_timesteps - self.skip_timesteps - 1
if self.ckpt == "512x512 HQ Cond":
print("Using conditional sampling fn")
self.samples = sample_fn(
self.model,
(self.batch_size, 3, self.side_y, self.side_x),
clip_denoised=False,
model_kwargs={
"y": torch.zeros(
[self.batch_size], device=self.device, dtype=torch.long
)
},
cond_fn=self.cond_fn_conditional,
progress=True,
skip_timesteps=self.skip_timesteps,
init_image=self.init,
randomize_class=True,
)
else:
print("Using unconditional sampling fn")
self.samples = sample_fn(
self.model,
(self.batch_size, 3, self.side_y, self.side_x),
clip_denoised=False,
model_kwargs={},
cond_fn=self.cond_fn,
progress=True,
skip_timesteps=self.skip_timesteps,
init_image=self.init,
randomize_class=True,
cond_fn_with_grad=True,
)
self.samplesgen = enumerate(self.samples)
def iterate(self):
self.cur_t -= 1
_, sample = next(self.samplesgen)
ims = []
for _, image in enumerate(sample["pred_xstart"]):
im = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))
ims.append(im)
return ims
| true | true |
f731f4063b2323508a137e94df93708a25ff3792 | 11,583 | py | Python | evap/contributor/views.py | JannisBerndt/EvaP | a3ca8bcf091e811421084c4db14ae9666cf2a27f | [
"MIT"
] | 26 | 2015-01-18T18:01:57.000Z | 2018-10-12T14:37:15.000Z | evap/contributor/views.py | JannisBerndt/EvaP | a3ca8bcf091e811421084c4db14ae9666cf2a27f | [
"MIT"
] | 737 | 2015-01-02T17:43:25.000Z | 2018-12-10T20:45:10.000Z | evap/contributor/views.py | JannisBerndt/EvaP | a3ca8bcf091e811421084c4db14ae9666cf2a27f | [
"MIT"
] | 83 | 2015-01-14T12:39:41.000Z | 2018-10-29T16:36:43.000Z | from django.contrib import messages
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.db import IntegrityError, transaction
from django.db.models import Exists, Max, OuterRef, Q
from django.forms.models import inlineformset_factory
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import gettext as _
from django.views.decorators.http import require_POST
from evap.contributor.forms import DelegateSelectionForm, EditorContributionForm, EvaluationForm
from evap.evaluation.auth import editor_or_delegate_required, responsible_or_contributor_or_delegate_required
from evap.evaluation.models import (
Contribution,
Course,
CourseType,
Degree,
EmailTemplate,
Evaluation,
Semester,
UserProfile,
)
from evap.evaluation.tools import (
FileResponse,
get_object_from_dict_pk_entry_or_logged_40x,
get_parameter_from_url_or_session,
sort_formset,
)
from evap.results.exporters import ResultsExporter
from evap.results.tools import annotate_distributions_and_grades, get_evaluations_with_course_result_attributes
from evap.staff.forms import ContributionFormset
from evap.student.views import get_valid_form_groups_or_render_vote_page
@responsible_or_contributor_or_delegate_required
def index(request):
user = request.user
show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)
represented_proxy_users = user.represented_users.filter(is_proxy_user=True)
contributor_visible_states = [
Evaluation.State.PREPARED,
Evaluation.State.EDITOR_APPROVED,
Evaluation.State.APPROVED,
Evaluation.State.IN_EVALUATION,
Evaluation.State.EVALUATED,
Evaluation.State.REVIEWED,
Evaluation.State.PUBLISHED,
]
own_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states)
& (
Q(responsibles=user)
| Q(evaluations__contributions__contributor=user)
| Q(evaluations__contributions__contributor__in=represented_proxy_users)
| Q(responsibles__in=represented_proxy_users)
)
)
own_evaluations = (
Evaluation.objects.filter(course__in=own_courses)
.annotate(contributes_to=Exists(Evaluation.objects.filter(id=OuterRef("id"), contributions__contributor=user)))
.prefetch_related("course", "course__evaluations", "course__degrees", "course__type", "course__semester")
)
own_evaluations = [evaluation for evaluation in own_evaluations if evaluation.can_be_seen_by(user)]
displayed_evaluations = own_evaluations
if show_delegated:
represented_users = user.represented_users.exclude(is_proxy_user=True)
delegated_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states)
& (
Q(responsibles__in=represented_users)
| Q(
evaluations__contributions__role=Contribution.Role.EDITOR,
evaluations__contributions__contributor__in=represented_users,
)
)
)
delegated_evaluations = Evaluation.objects.filter(course__in=delegated_courses).prefetch_related(
"course", "course__evaluations", "course__degrees", "course__type", "course__semester"
)
delegated_evaluations = [evaluation for evaluation in delegated_evaluations if evaluation.can_be_seen_by(user)]
for evaluation in delegated_evaluations:
evaluation.delegated_evaluation = True
displayed_evaluations += set(delegated_evaluations) - set(displayed_evaluations)
displayed_evaluations.sort(
key=lambda evaluation: (evaluation.course.name, evaluation.name)
) # evaluations must be sorted for regrouping them in the template
annotate_distributions_and_grades(e for e in displayed_evaluations if e.state == Evaluation.State.PUBLISHED)
displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)
semesters = Semester.objects.all()
semester_list = [
dict(
semester_name=semester.name,
id=semester.id,
is_active=semester.is_active,
evaluations=[
evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id
],
)
for semester in semesters
]
template_data = dict(
semester_list=semester_list,
show_delegated=show_delegated,
delegate_selection_form=DelegateSelectionForm(),
)
return render(request, "contributor_index.html", template_data)
@editor_or_delegate_required
def evaluation_view(request, evaluation_id):
user = request.user
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
# check rights
if (
not evaluation.is_user_editor_or_delegate(user)
or not Evaluation.State.PREPARED <= evaluation.state <= Evaluation.State.REVIEWED
):
raise PermissionDenied
InlineContributionFormset = inlineformset_factory(
Evaluation, Contribution, formset=ContributionFormset, form=EditorContributionForm, extra=0
)
form = EvaluationForm(request.POST or None, instance=evaluation)
formset = InlineContributionFormset(request.POST or None, instance=evaluation)
# make everything read-only
for cform in formset.forms + [form]:
for field in cform.fields.values():
field.disabled = True
template_data = dict(
form=form,
formset=formset,
evaluation=evaluation,
editable=False,
questionnaires_with_answers_per_contributor={},
)
return render(request, "contributor_evaluation_form.html", template_data)
def render_preview(request, formset, evaluation_form, evaluation):
# open transaction to not let any other requests see anything of what we're doing here
try:
with transaction.atomic():
evaluation = evaluation_form.save()
formset.save()
request.POST = None # this prevents errors rendered in the vote form
preview_response = get_valid_form_groups_or_render_vote_page(
request, evaluation, preview=True, for_rendering_in_modal=True
)[1].content.decode()
raise IntegrityError # rollback transaction to discard the database writes
except IntegrityError:
pass
return preview_response
@editor_or_delegate_required
def evaluation_edit(request, evaluation_id):
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
# check rights
if not (evaluation.is_user_editor_or_delegate(request.user) and evaluation.state == Evaluation.State.PREPARED):
raise PermissionDenied
post_operation = request.POST.get("operation") if request.POST else None
preview = post_operation == "preview"
InlineContributionFormset = inlineformset_factory(
Evaluation, Contribution, formset=ContributionFormset, form=EditorContributionForm, extra=1
)
evaluation_form = EvaluationForm(request.POST or None, instance=evaluation)
formset = InlineContributionFormset(
request.POST or None, instance=evaluation, form_kwargs={"evaluation": evaluation}
)
forms_are_valid = evaluation_form.is_valid() and formset.is_valid()
if forms_are_valid and not preview:
if post_operation not in ("save", "approve"):
raise SuspiciousOperation("Invalid POST operation")
form_has_changed = evaluation_form.has_changed() or formset.has_changed()
evaluation_form.save()
formset.save()
if post_operation == "approve":
evaluation.editor_approve()
evaluation.save()
if form_has_changed:
messages.success(request, _("Successfully updated and approved evaluation."))
else:
messages.success(request, _("Successfully approved evaluation."))
else:
messages.success(request, _("Successfully updated evaluation."))
return redirect("contributor:index")
preview_html = None
if preview and forms_are_valid:
preview_html = render_preview(request, formset, evaluation_form, evaluation)
if not forms_are_valid and (evaluation_form.errors or formset.errors):
if preview:
messages.error(request, _("The preview could not be rendered. Please resolve the errors shown below."))
else:
messages.error(request, _("The form was not saved. Please resolve the errors shown below."))
sort_formset(request, formset)
template_data = dict(
form=evaluation_form,
formset=formset,
evaluation=evaluation,
editable=True,
preview_html=preview_html,
questionnaires_with_answers_per_contributor={},
)
return render(request, "contributor_evaluation_form.html", template_data)
@responsible_or_contributor_or_delegate_required
def evaluation_preview(request, evaluation_id):
user = request.user
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
# check rights
if not (
evaluation.is_user_responsible_or_contributor_or_delegate(user)
and Evaluation.State.PREPARED <= evaluation.state <= Evaluation.State.REVIEWED
):
raise PermissionDenied
return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1]
@require_POST
@editor_or_delegate_required
def evaluation_direct_delegation(request, evaluation_id):
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
delegate_user = get_object_from_dict_pk_entry_or_logged_40x(UserProfile, request.POST, "delegate_to")
contribution, created = Contribution.objects.update_or_create(
evaluation=evaluation,
contributor=delegate_user,
defaults={"role": Contribution.Role.EDITOR},
)
if created:
contribution.order = evaluation.contributions.all().aggregate(Max("order"))["order__max"] + 1
contribution.save()
template = EmailTemplate.objects.get(name=EmailTemplate.DIRECT_DELEGATION)
subject_params = {"evaluation": evaluation, "user": request.user, "delegate_user": delegate_user}
body_params = subject_params
# we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have
# an email address. In this special case, we don't want that warning. Instead, we want a mail to the admins.
template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user])
messages.add_message(
request,
messages.SUCCESS,
_('{} was added as a contributor for evaluation "{}" and was sent an email with further information.').format(
str(delegate_user), str(evaluation)
),
)
return redirect("contributor:index")
def export_contributor_results(contributor):
filename = f"Evaluation_{contributor.full_name}.xls"
response = FileResponse(filename, content_type="application/vnd.ms-excel")
ResultsExporter().export(
response,
Semester.objects.all(),
[(Degree.objects.all(), CourseType.objects.all())],
include_not_enough_voters=True,
include_unpublished=False,
contributor=contributor,
)
return response
@responsible_or_contributor_or_delegate_required
def export(request):
return export_contributor_results(request.user)
| 39 | 131 | 0.723388 | from django.contrib import messages
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.db import IntegrityError, transaction
from django.db.models import Exists, Max, OuterRef, Q
from django.forms.models import inlineformset_factory
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import gettext as _
from django.views.decorators.http import require_POST
from evap.contributor.forms import DelegateSelectionForm, EditorContributionForm, EvaluationForm
from evap.evaluation.auth import editor_or_delegate_required, responsible_or_contributor_or_delegate_required
from evap.evaluation.models import (
Contribution,
Course,
CourseType,
Degree,
EmailTemplate,
Evaluation,
Semester,
UserProfile,
)
from evap.evaluation.tools import (
FileResponse,
get_object_from_dict_pk_entry_or_logged_40x,
get_parameter_from_url_or_session,
sort_formset,
)
from evap.results.exporters import ResultsExporter
from evap.results.tools import annotate_distributions_and_grades, get_evaluations_with_course_result_attributes
from evap.staff.forms import ContributionFormset
from evap.student.views import get_valid_form_groups_or_render_vote_page
@responsible_or_contributor_or_delegate_required
def index(request):
user = request.user
show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)
represented_proxy_users = user.represented_users.filter(is_proxy_user=True)
contributor_visible_states = [
Evaluation.State.PREPARED,
Evaluation.State.EDITOR_APPROVED,
Evaluation.State.APPROVED,
Evaluation.State.IN_EVALUATION,
Evaluation.State.EVALUATED,
Evaluation.State.REVIEWED,
Evaluation.State.PUBLISHED,
]
own_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states)
& (
Q(responsibles=user)
| Q(evaluations__contributions__contributor=user)
| Q(evaluations__contributions__contributor__in=represented_proxy_users)
| Q(responsibles__in=represented_proxy_users)
)
)
own_evaluations = (
Evaluation.objects.filter(course__in=own_courses)
.annotate(contributes_to=Exists(Evaluation.objects.filter(id=OuterRef("id"), contributions__contributor=user)))
.prefetch_related("course", "course__evaluations", "course__degrees", "course__type", "course__semester")
)
own_evaluations = [evaluation for evaluation in own_evaluations if evaluation.can_be_seen_by(user)]
displayed_evaluations = own_evaluations
if show_delegated:
represented_users = user.represented_users.exclude(is_proxy_user=True)
delegated_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states)
& (
Q(responsibles__in=represented_users)
| Q(
evaluations__contributions__role=Contribution.Role.EDITOR,
evaluations__contributions__contributor__in=represented_users,
)
)
)
delegated_evaluations = Evaluation.objects.filter(course__in=delegated_courses).prefetch_related(
"course", "course__evaluations", "course__degrees", "course__type", "course__semester"
)
delegated_evaluations = [evaluation for evaluation in delegated_evaluations if evaluation.can_be_seen_by(user)]
for evaluation in delegated_evaluations:
evaluation.delegated_evaluation = True
displayed_evaluations += set(delegated_evaluations) - set(displayed_evaluations)
displayed_evaluations.sort(
key=lambda evaluation: (evaluation.course.name, evaluation.name)
)
annotate_distributions_and_grades(e for e in displayed_evaluations if e.state == Evaluation.State.PUBLISHED)
displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)
semesters = Semester.objects.all()
semester_list = [
dict(
semester_name=semester.name,
id=semester.id,
is_active=semester.is_active,
evaluations=[
evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id
],
)
for semester in semesters
]
template_data = dict(
semester_list=semester_list,
show_delegated=show_delegated,
delegate_selection_form=DelegateSelectionForm(),
)
return render(request, "contributor_index.html", template_data)
@editor_or_delegate_required
def evaluation_view(request, evaluation_id):
user = request.user
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
if (
not evaluation.is_user_editor_or_delegate(user)
or not Evaluation.State.PREPARED <= evaluation.state <= Evaluation.State.REVIEWED
):
raise PermissionDenied
InlineContributionFormset = inlineformset_factory(
Evaluation, Contribution, formset=ContributionFormset, form=EditorContributionForm, extra=0
)
form = EvaluationForm(request.POST or None, instance=evaluation)
formset = InlineContributionFormset(request.POST or None, instance=evaluation)
for cform in formset.forms + [form]:
for field in cform.fields.values():
field.disabled = True
template_data = dict(
form=form,
formset=formset,
evaluation=evaluation,
editable=False,
questionnaires_with_answers_per_contributor={},
)
return render(request, "contributor_evaluation_form.html", template_data)
def render_preview(request, formset, evaluation_form, evaluation):
try:
with transaction.atomic():
evaluation = evaluation_form.save()
formset.save()
request.POST = None # this prevents errors rendered in the vote form
preview_response = get_valid_form_groups_or_render_vote_page(
request, evaluation, preview=True, for_rendering_in_modal=True
)[1].content.decode()
raise IntegrityError # rollback transaction to discard the database writes
except IntegrityError:
pass
return preview_response
@editor_or_delegate_required
def evaluation_edit(request, evaluation_id):
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
# check rights
if not (evaluation.is_user_editor_or_delegate(request.user) and evaluation.state == Evaluation.State.PREPARED):
raise PermissionDenied
post_operation = request.POST.get("operation") if request.POST else None
preview = post_operation == "preview"
InlineContributionFormset = inlineformset_factory(
Evaluation, Contribution, formset=ContributionFormset, form=EditorContributionForm, extra=1
)
evaluation_form = EvaluationForm(request.POST or None, instance=evaluation)
formset = InlineContributionFormset(
request.POST or None, instance=evaluation, form_kwargs={"evaluation": evaluation}
)
forms_are_valid = evaluation_form.is_valid() and formset.is_valid()
if forms_are_valid and not preview:
if post_operation not in ("save", "approve"):
raise SuspiciousOperation("Invalid POST operation")
form_has_changed = evaluation_form.has_changed() or formset.has_changed()
evaluation_form.save()
formset.save()
if post_operation == "approve":
evaluation.editor_approve()
evaluation.save()
if form_has_changed:
messages.success(request, _("Successfully updated and approved evaluation."))
else:
messages.success(request, _("Successfully approved evaluation."))
else:
messages.success(request, _("Successfully updated evaluation."))
return redirect("contributor:index")
preview_html = None
if preview and forms_are_valid:
preview_html = render_preview(request, formset, evaluation_form, evaluation)
if not forms_are_valid and (evaluation_form.errors or formset.errors):
if preview:
messages.error(request, _("The preview could not be rendered. Please resolve the errors shown below."))
else:
messages.error(request, _("The form was not saved. Please resolve the errors shown below."))
sort_formset(request, formset)
template_data = dict(
form=evaluation_form,
formset=formset,
evaluation=evaluation,
editable=True,
preview_html=preview_html,
questionnaires_with_answers_per_contributor={},
)
return render(request, "contributor_evaluation_form.html", template_data)
@responsible_or_contributor_or_delegate_required
def evaluation_preview(request, evaluation_id):
user = request.user
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
# check rights
if not (
evaluation.is_user_responsible_or_contributor_or_delegate(user)
and Evaluation.State.PREPARED <= evaluation.state <= Evaluation.State.REVIEWED
):
raise PermissionDenied
return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1]
@require_POST
@editor_or_delegate_required
def evaluation_direct_delegation(request, evaluation_id):
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
delegate_user = get_object_from_dict_pk_entry_or_logged_40x(UserProfile, request.POST, "delegate_to")
contribution, created = Contribution.objects.update_or_create(
evaluation=evaluation,
contributor=delegate_user,
defaults={"role": Contribution.Role.EDITOR},
)
if created:
contribution.order = evaluation.contributions.all().aggregate(Max("order"))["order__max"] + 1
contribution.save()
template = EmailTemplate.objects.get(name=EmailTemplate.DIRECT_DELEGATION)
subject_params = {"evaluation": evaluation, "user": request.user, "delegate_user": delegate_user}
body_params = subject_params
# we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have
template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user])
messages.add_message(
request,
messages.SUCCESS,
_('{} was added as a contributor for evaluation "{}" and was sent an email with further information.').format(
str(delegate_user), str(evaluation)
),
)
return redirect("contributor:index")
def export_contributor_results(contributor):
filename = f"Evaluation_{contributor.full_name}.xls"
response = FileResponse(filename, content_type="application/vnd.ms-excel")
ResultsExporter().export(
response,
Semester.objects.all(),
[(Degree.objects.all(), CourseType.objects.all())],
include_not_enough_voters=True,
include_unpublished=False,
contributor=contributor,
)
return response
@responsible_or_contributor_or_delegate_required
def export(request):
return export_contributor_results(request.user)
| true | true |
f731f40f8cf167eb9de4c3331309f87f3f6d2a1d | 882 | py | Python | tests/test_deep/test_agents/test_a2c.py | Zeus3101/genrl | 03e7f693ede98b2afbcb3fc8505f8a2bc4eedffc | [
"MIT"
] | 1 | 2020-06-13T15:21:57.000Z | 2020-06-13T15:21:57.000Z | tests/test_deep/test_agents/test_a2c.py | Zeus3101/genrl | 03e7f693ede98b2afbcb3fc8505f8a2bc4eedffc | [
"MIT"
] | null | null | null | tests/test_deep/test_agents/test_a2c.py | Zeus3101/genrl | 03e7f693ede98b2afbcb3fc8505f8a2bc4eedffc | [
"MIT"
] | 2 | 2020-06-13T15:29:06.000Z | 2021-08-20T15:48:53.000Z | import shutil
from genrl.agents import A2C
from genrl.environments import VectorEnv
from genrl.trainers import OnPolicyTrainer
def test_a2c():
env = VectorEnv("CartPole-v0", 1)
algo = A2C("mlp", env, rollout_size=128)
trainer = OnPolicyTrainer(algo, env, log_mode=["csv"], logdir="./logs", epochs=1)
trainer.train()
shutil.rmtree("./logs")
def test_a2c_cnn():
env = VectorEnv("Pong-v0", 1, env_type="atari")
algo = A2C("cnn", env, rollout_size=128)
trainer = OnPolicyTrainer(algo, env, log_mode=["csv"], logdir="./logs", epochs=1)
trainer.train()
shutil.rmtree("./logs")
def test_a2c_shared():
env = VectorEnv("CartPole-v0", 1)
algo = A2C("mlp", env, shared_layers=(32, 32), rollout_size=128)
trainer = OnPolicyTrainer(algo, env, log_mode=["csv"], logdir="./logs", epochs=1)
trainer.train()
shutil.rmtree("./logs")
| 29.4 | 85 | 0.666667 | import shutil
from genrl.agents import A2C
from genrl.environments import VectorEnv
from genrl.trainers import OnPolicyTrainer
def test_a2c():
env = VectorEnv("CartPole-v0", 1)
algo = A2C("mlp", env, rollout_size=128)
trainer = OnPolicyTrainer(algo, env, log_mode=["csv"], logdir="./logs", epochs=1)
trainer.train()
shutil.rmtree("./logs")
def test_a2c_cnn():
env = VectorEnv("Pong-v0", 1, env_type="atari")
algo = A2C("cnn", env, rollout_size=128)
trainer = OnPolicyTrainer(algo, env, log_mode=["csv"], logdir="./logs", epochs=1)
trainer.train()
shutil.rmtree("./logs")
def test_a2c_shared():
env = VectorEnv("CartPole-v0", 1)
algo = A2C("mlp", env, shared_layers=(32, 32), rollout_size=128)
trainer = OnPolicyTrainer(algo, env, log_mode=["csv"], logdir="./logs", epochs=1)
trainer.train()
shutil.rmtree("./logs")
| true | true |
f731f473beaac8baa64a2a5996f9d8e18231c089 | 12,612 | py | Python | tests/cic_test.py | jurreht/cic | 95a5e32eeb26da8d18642add2259f164426e1a25 | [
"MIT"
] | null | null | null | tests/cic_test.py | jurreht/cic | 95a5e32eeb26da8d18642add2259f164426e1a25 | [
"MIT"
] | null | null | null | tests/cic_test.py | jurreht/cic | 95a5e32eeb26da8d18642add2259f164426e1a25 | [
"MIT"
] | null | null | null | import os
import numpy as np
from numpy.testing import assert_allclose
import pytest
import scipy.io
import scipy.stats
import cic
def cases():
"""
Loads all filenames of the pre-calculated test cases.
"""
case_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cases'
)
cases = []
for dir_path, _, files in os.walk(case_dir):
cases = cases + [os.path.join(dir_path, f) for f in files]
return cases
@pytest.mark.parametrize('inpath', cases())
# Run both serially and in parallel
@pytest.mark.parametrize('n_jobs', [None, -1])
def test_cic(inpath, n_jobs):
np.random.seed(323490)
# Load the case
objs = scipy.io.loadmat(inpath)
y00 = objs['y00'][:, 0]
y01 = objs['y01'][:, 0]
y10 = objs['y10'][:, 0]
y11 = objs['y11'][:, 0]
est_qte, se_qte, est_ate, se_ate = cic.calculate_cic(
y00, y01, y10, y11, n_bootstraps=499, n_draws=10000,
moments=[np.mean],
n_jobs=n_jobs,
# The original code uses some small (in my view unneccessary)
# numerical corrections when calculating cdf's and inverse cdf's.
# Without using them here also there will always be some test
# cases slightly off.
use_corrections=True
)
est_test = objs['est'][0, 1:10]
se_test = objs['se'][1, 1:10]
# Test quantile treatment effects
assert_allclose(est_qte, est_test)
assert_allclose(se_qte, se_test, atol=5e-2, rtol=1e-3)
# Test average treatment effect
# It is possible to get closer than an atol of 5e-3 by increasing n_draws
# above, at the cost of slower tests
assert_allclose(est_ate[0], objs['est'][0, 0], atol=5e-3)
assert_allclose(se_ate[0], objs['se'][1, 0], atol=5e-2, rtol=1e-3)
@pytest.mark.parametrize(
'inpath',
# exp8 and exp10 don't pass without use_corrections, which is only
# supported for the simple case.
[c for c in cases() if not ('exp8' in c or 'exp10' in c)])
def test_multiple_cic_from_simple_case(inpath):
np.random.seed(442342234)
# Load the case
objs = scipy.io.loadmat(inpath)
y00 = objs['y00'][:, 0]
y01 = objs['y01'][:, 0]
y10 = objs['y10'][:, 0]
y11 = objs['y11'][:, 0]
y = np.concatenate([y00, y01, y10, y11])
g = np.concatenate([np.zeros(y00.shape[0] + y01.shape[0], dtype=np.int_),
np.ones(y10.shape[0] + y11.shape[0], dtype=np.int_)])
t = np.concatenate([np.zeros(y00.shape[0], dtype=np.int_),
np.ones(y01.shape[0], dtype=np.int_),
np.zeros(y10.shape[0], dtype=np.int_),
np.ones(y11.shape[0], dtype=np.int_)])
treat = np.array([[0, 0], [0, 1]], dtype=np.bool_)
model = cic.CICModel(y, g, t, treat, n_bootstraps=499, moments=[np.mean],
n_draws=10000)
assert np.all(model.treatment_for == np.array([[1, 1]], dtype=np.int_))
est_test = objs['est'][0, 1:10]
se_test = objs['se'][1, 1:10]
assert_allclose(model.quantile_effect[0], est_test)
assert_allclose(model.quantile_se[0], se_test, atol=5e-2, rtol=1e-3)
# Test average treatment effect
# It is possible to get closer than an atol of 5e-3 by increasing n_draws
# above, at the cost of slower tests
assert_allclose(model.moment_effect[0], objs['est'][0, 0], atol=5e-3)
assert_allclose(model.moment_se[0], objs['se'][1, 0], atol=5e-2, rtol=1e-3)
def test_cic_model_no_effect():
"""
Test a 3x3 CIC model where none of the treatments have any effect.
The test is done by simulating and estimating the model many times
and checking the coverage of the confidence intervals.
"""
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 1000
quantiles = np.array([0.1, .3, .5, .7, .9])
effect_in_ci = np.zeros((3, quantiles.shape[0]), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
model = cic.CICModel(y, g, t, treat, quantiles)
effect_in_ci += (
(model.quantile_effect - 1.96 * model.quantile_se <= 0) &
(model.quantile_effect + 1.96 * model.quantile_se >= 0))
coverage = effect_in_ci / n_trials
assert_allclose(coverage, np.ones_like(coverage) * .95, rtol=5e-2)
def test_cic_model_shift_effect():
"""
Test a 3x3 CIC model where the treatments are linear shifts, but
different for different groups and times.
The test is done by simulating and estimating the model many times
and checking the coverage of the confidence intervals.
"""
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 1000
quantiles = np.array([.25, .5, .75])
moments = [np.mean, np.std]
quantile_in_ci = np.zeros((3, 3, 3), dtype=np.int_)
moment_in_ci = np.zeros((3, 3, 2), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
y[(g == 1) & (t == 2)] += 1
y[(g == 2) & (t == 1)] -= 1
y[(g == 2) & (t == 2)] -= 2
model = cic.CICModel(y, g, t, treat, quantiles, moments)
mean, se = model.treatment_quantile(1, 2)
quantile_in_ci[:, 0] += ((mean - 1.96 * se <= 1) &
(mean + 1.96 * se >= 1))
mean, se = model.treatment_quantile(2, 1)
quantile_in_ci[:, 1] += ((mean - 1.96 * se <= -1) &
(mean + 1.96 * se >= -1))
mean, se = model.treatment_quantile(2, 2)
quantile_in_ci[:, 2] += ((mean - 1.96 * se <= -2) &
(mean + 1.96 * se >= -2))
mean, se = model.treatment_moment(1, 2)
moment_in_ci[:, 0, 0] += ((mean[0] - 1.96 * se[0] <= 1) &
(mean[0] + 1.96 * se[0] >= 1))
moment_in_ci[:, 0, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
mean, se = model.treatment_moment(2, 1)
moment_in_ci[:, 1, 0] += ((mean[0] - 1.96 * se[0] <= -1) &
(mean[0] + 1.96 * se[0] >= -1))
moment_in_ci[:, 1, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
mean, se = model.treatment_moment(2, 2)
moment_in_ci[:, 2, 0] += ((mean[0] - 1.96 * se[0] <= -2) &
(mean[0] + 1.96 * se[0] >= -2))
moment_in_ci[:, 2, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
quantile_coverage = quantile_in_ci / n_trials
assert_allclose(quantile_coverage,
np.ones_like(quantile_coverage) * .95,
rtol=5e-2)
moment_coverage = moment_in_ci / n_trials
assert_allclose(moment_coverage,
np.ones_like(moment_in_ci) * .95,
rtol=5e-2)
def test_cic_model_dispersion_effect():
"""
Test a 3x3 CIC model where treatments are multiplying the distribution
by some number, which differs by group and time.
The test is done by simulating and estimating the model many times
and checking the coverage of the confidence intervals.
"""
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 2000
quantiles = np.array([.5])
moments = [np.mean, np.std]
quantile_in_ci = np.zeros((3, 3, 1), dtype=np.int_)
moment_in_ci = np.zeros((3, 3, 2), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
y[(g == 1) & (t == 2)] *= 2
y[(g == 2) & (t == 1)] *= 3
y[(g == 2) & (t == 2)] *= .5
model = cic.CICModel(y, g, t, treat, quantiles, moments)
# Q_{aX}(p) = a Q_X(p) for a quantile function Q and a > 0.
# The median here is 1000, 2 * 1000 = 2000, hence the QTE is 1000
mean, se = model.treatment_quantile(1, 2)
quantile_in_ci[:, 0] += ((mean - 1.96 * se <= 1000) &
(mean + 1.96 * se >= 1000))
# The median here is 0, 3 * 0 = 0, hence the QTE is 0
mean, se = model.treatment_quantile(2, 1)
quantile_in_ci[:, 1] += ((mean - 1.96 * se <= 0) &
(mean + 1.96 * se >= 0))
# The median here is 1000, .5 * 1000 = 500, hence the QTE is -500
mean, se = model.treatment_quantile(2, 2)
quantile_in_ci[:, 2] += ((mean - 1.96 * se <= -500) &
(mean + 1.96 * se >= -500))
mean, se = model.treatment_moment(1, 2)
# The mean goes from 1000 to 2000 => ATE = 1000
moment_in_ci[:, 0, 0] += ((mean[0] - 1.96 * se[0] <= 1000) &
(mean[0] + 1.96 * se[0] >= 1000))
# The standard deviation goes from 1 to 2 => TE = 1
moment_in_ci[:, 0, 1] += ((mean[1] - 1.96 * se[1] <= 1) &
(mean[1] + 1.96 * se[1] >= 1))
mean, se = model.treatment_moment(2, 1)
# The mean goes from 0 to 0 => ATE = 0
moment_in_ci[:, 1, 0] += ((mean[0] - 1.96 * se[0] <= 0) &
(mean[0] + 1.96 * se[0] >= 0))
# The standard deviation goes from 1/3 to 1 => TE = 2/3
moment_in_ci[:, 1, 1] += ((mean[1] - 1.96 * se[1] <= 2 / 3) &
(mean[1] + 1.96 * se[1] >= 2 / 3))
mean, se = model.treatment_moment(2, 2)
# The mean goes from 1000 to 500 => ATE = -500
moment_in_ci[:, 2, 0] += ((mean[0] - 1.96 * se[0] <= -500) &
(mean[0] + 1.96 * se[0] >= -500))
# The standard deviation goes from 1 to .5 => TE = -.5
moment_in_ci[:, 2, 1] += ((mean[1] - 1.96 * se[1] <= -.5) &
(mean[1] + 1.96 * se[1] >= -.5))
quantile_coverage = quantile_in_ci / n_trials
assert_allclose(quantile_coverage,
np.ones_like(quantile_coverage) * .95,
rtol=5e-2)
moment_coverage = moment_in_ci / n_trials
assert_allclose(moment_coverage,
np.ones_like(moment_in_ci) * .95,
rtol=5e-2)
def test_test_model_based_on_quantile_valid():
np.random.seed(3423482)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 100
n_obs = 500
quantiles = np.array([.5])
reject = 0
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
# y[(g == 1) & (t == 2)] = 2 * y[(g == 1) & (t == 2)] - 3
# y[(g == 2) & (t == 1)] = np.exp(y[(g == 2) & (t == 1)])
# y[(g == 1) & (t == 2)] *= 2
# y[(g == 2) & (t == 1)] -= 3
# y[(g == 2) & (t == 2)] += 1
model = cic.CICModel(y, g, t, treat, quantiles)
test_stat, rank_dist = model.test_model_based_on_quantile(0)
crit_val = scipy.stats.chi2.ppf(.95, rank_dist)
# import pdb; pdb.set_trace()
if test_stat > crit_val:
reject += 1
reject_prob = reject / n_trials
# Just check that the rejection probability is not too large.
# To get reject_prob~0.05 increse n_obs above, but this slows
# down the test too much.
assert reject_prob <= 0.05
def test_combine_effects():
np.random.seed(4545543)
treat = np.array([
[0, 0, 0],
[0, 1, 1]
], dtype=np.bool)
g = np.concatenate((np.zeros(3000, dtype=np.int_), np.ones(4000, dtype=np.int_)))
t = np.concatenate((np.full(1000, 0), np.full(1000, 1), np.full(1000, 2),
np.full(1000, 0), np.full(1000, 1), np.full(2000, 2)))
y = np.random.randn(7000)
y[(g == 1) & (t == 1)] += 1
y[(g == 1) & (t == 2)] += 2
model = cic.CICModel(y, g, t, treat, np.array([.5, .6]), [np.mean], n_draws=2000)
qte_effect, _, moment_effect, _ = model.combine_effects([(1, 1), (1, 2)])
true_effect = 1 / 3 + 2 * 2 / 3
assert_allclose(qte_effect, true_effect, rtol=5e-2)
assert_allclose(moment_effect, true_effect, rtol=5e-2)
def generate_sample(n_obs):
g = np.random.choice(np.arange(3), n_obs)
t = np.random.choice(np.arange(3), n_obs)
u = np.random.randn(n_obs)
y = np.empty(n_obs)
y[t == 0] = u[t == 0]**3
y[t == 1] = u[t == 1] / 3
y[t == 2] = u[t == 2] + 1000
return g, t, y
| 37.20354 | 85 | 0.530526 | import os
import numpy as np
from numpy.testing import assert_allclose
import pytest
import scipy.io
import scipy.stats
import cic
def cases():
case_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cases'
)
cases = []
for dir_path, _, files in os.walk(case_dir):
cases = cases + [os.path.join(dir_path, f) for f in files]
return cases
@pytest.mark.parametrize('inpath', cases())
@pytest.mark.parametrize('n_jobs', [None, -1])
def test_cic(inpath, n_jobs):
np.random.seed(323490)
objs = scipy.io.loadmat(inpath)
y00 = objs['y00'][:, 0]
y01 = objs['y01'][:, 0]
y10 = objs['y10'][:, 0]
y11 = objs['y11'][:, 0]
est_qte, se_qte, est_ate, se_ate = cic.calculate_cic(
y00, y01, y10, y11, n_bootstraps=499, n_draws=10000,
moments=[np.mean],
n_jobs=n_jobs,
use_corrections=True
)
est_test = objs['est'][0, 1:10]
se_test = objs['se'][1, 1:10]
assert_allclose(est_qte, est_test)
assert_allclose(se_qte, se_test, atol=5e-2, rtol=1e-3)
assert_allclose(est_ate[0], objs['est'][0, 0], atol=5e-3)
assert_allclose(se_ate[0], objs['se'][1, 0], atol=5e-2, rtol=1e-3)
@pytest.mark.parametrize(
'inpath',
# supported for the simple case.
[c for c in cases() if not ('exp8' in c or 'exp10' in c)])
def test_multiple_cic_from_simple_case(inpath):
np.random.seed(442342234)
# Load the case
objs = scipy.io.loadmat(inpath)
y00 = objs['y00'][:, 0]
y01 = objs['y01'][:, 0]
y10 = objs['y10'][:, 0]
y11 = objs['y11'][:, 0]
y = np.concatenate([y00, y01, y10, y11])
g = np.concatenate([np.zeros(y00.shape[0] + y01.shape[0], dtype=np.int_),
np.ones(y10.shape[0] + y11.shape[0], dtype=np.int_)])
t = np.concatenate([np.zeros(y00.shape[0], dtype=np.int_),
np.ones(y01.shape[0], dtype=np.int_),
np.zeros(y10.shape[0], dtype=np.int_),
np.ones(y11.shape[0], dtype=np.int_)])
treat = np.array([[0, 0], [0, 1]], dtype=np.bool_)
model = cic.CICModel(y, g, t, treat, n_bootstraps=499, moments=[np.mean],
n_draws=10000)
assert np.all(model.treatment_for == np.array([[1, 1]], dtype=np.int_))
est_test = objs['est'][0, 1:10]
se_test = objs['se'][1, 1:10]
assert_allclose(model.quantile_effect[0], est_test)
assert_allclose(model.quantile_se[0], se_test, atol=5e-2, rtol=1e-3)
# Test average treatment effect
# It is possible to get closer than an atol of 5e-3 by increasing n_draws
# above, at the cost of slower tests
assert_allclose(model.moment_effect[0], objs['est'][0, 0], atol=5e-3)
assert_allclose(model.moment_se[0], objs['se'][1, 0], atol=5e-2, rtol=1e-3)
def test_cic_model_no_effect():
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 1000
quantiles = np.array([0.1, .3, .5, .7, .9])
effect_in_ci = np.zeros((3, quantiles.shape[0]), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
model = cic.CICModel(y, g, t, treat, quantiles)
effect_in_ci += (
(model.quantile_effect - 1.96 * model.quantile_se <= 0) &
(model.quantile_effect + 1.96 * model.quantile_se >= 0))
coverage = effect_in_ci / n_trials
assert_allclose(coverage, np.ones_like(coverage) * .95, rtol=5e-2)
def test_cic_model_shift_effect():
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 1000
quantiles = np.array([.25, .5, .75])
moments = [np.mean, np.std]
quantile_in_ci = np.zeros((3, 3, 3), dtype=np.int_)
moment_in_ci = np.zeros((3, 3, 2), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
y[(g == 1) & (t == 2)] += 1
y[(g == 2) & (t == 1)] -= 1
y[(g == 2) & (t == 2)] -= 2
model = cic.CICModel(y, g, t, treat, quantiles, moments)
mean, se = model.treatment_quantile(1, 2)
quantile_in_ci[:, 0] += ((mean - 1.96 * se <= 1) &
(mean + 1.96 * se >= 1))
mean, se = model.treatment_quantile(2, 1)
quantile_in_ci[:, 1] += ((mean - 1.96 * se <= -1) &
(mean + 1.96 * se >= -1))
mean, se = model.treatment_quantile(2, 2)
quantile_in_ci[:, 2] += ((mean - 1.96 * se <= -2) &
(mean + 1.96 * se >= -2))
mean, se = model.treatment_moment(1, 2)
moment_in_ci[:, 0, 0] += ((mean[0] - 1.96 * se[0] <= 1) &
(mean[0] + 1.96 * se[0] >= 1))
moment_in_ci[:, 0, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
mean, se = model.treatment_moment(2, 1)
moment_in_ci[:, 1, 0] += ((mean[0] - 1.96 * se[0] <= -1) &
(mean[0] + 1.96 * se[0] >= -1))
moment_in_ci[:, 1, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
mean, se = model.treatment_moment(2, 2)
moment_in_ci[:, 2, 0] += ((mean[0] - 1.96 * se[0] <= -2) &
(mean[0] + 1.96 * se[0] >= -2))
moment_in_ci[:, 2, 1] += ((mean[1] - 1.96 * se[1] <= 0) &
(mean[1] + 1.96 * se[1] >= 0))
quantile_coverage = quantile_in_ci / n_trials
assert_allclose(quantile_coverage,
np.ones_like(quantile_coverage) * .95,
rtol=5e-2)
moment_coverage = moment_in_ci / n_trials
assert_allclose(moment_coverage,
np.ones_like(moment_in_ci) * .95,
rtol=5e-2)
def test_cic_model_dispersion_effect():
np.random.seed(45354354)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 250
n_obs = 2000
quantiles = np.array([.5])
moments = [np.mean, np.std]
quantile_in_ci = np.zeros((3, 3, 1), dtype=np.int_)
moment_in_ci = np.zeros((3, 3, 2), dtype=np.int_)
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
y[(g == 1) & (t == 2)] *= 2
y[(g == 2) & (t == 1)] *= 3
y[(g == 2) & (t == 2)] *= .5
model = cic.CICModel(y, g, t, treat, quantiles, moments)
# Q_{aX}(p) = a Q_X(p) for a quantile function Q and a > 0.
# The median here is 1000, 2 * 1000 = 2000, hence the QTE is 1000
mean, se = model.treatment_quantile(1, 2)
quantile_in_ci[:, 0] += ((mean - 1.96 * se <= 1000) &
(mean + 1.96 * se >= 1000))
# The median here is 0, 3 * 0 = 0, hence the QTE is 0
mean, se = model.treatment_quantile(2, 1)
quantile_in_ci[:, 1] += ((mean - 1.96 * se <= 0) &
(mean + 1.96 * se >= 0))
# The median here is 1000, .5 * 1000 = 500, hence the QTE is -500
mean, se = model.treatment_quantile(2, 2)
quantile_in_ci[:, 2] += ((mean - 1.96 * se <= -500) &
(mean + 1.96 * se >= -500))
mean, se = model.treatment_moment(1, 2)
# The mean goes from 1000 to 2000 => ATE = 1000
moment_in_ci[:, 0, 0] += ((mean[0] - 1.96 * se[0] <= 1000) &
(mean[0] + 1.96 * se[0] >= 1000))
# The standard deviation goes from 1 to 2 => TE = 1
moment_in_ci[:, 0, 1] += ((mean[1] - 1.96 * se[1] <= 1) &
(mean[1] + 1.96 * se[1] >= 1))
mean, se = model.treatment_moment(2, 1)
# The mean goes from 0 to 0 => ATE = 0
moment_in_ci[:, 1, 0] += ((mean[0] - 1.96 * se[0] <= 0) &
(mean[0] + 1.96 * se[0] >= 0))
# The standard deviation goes from 1/3 to 1 => TE = 2/3
moment_in_ci[:, 1, 1] += ((mean[1] - 1.96 * se[1] <= 2 / 3) &
(mean[1] + 1.96 * se[1] >= 2 / 3))
mean, se = model.treatment_moment(2, 2)
# The mean goes from 1000 to 500 => ATE = -500
moment_in_ci[:, 2, 0] += ((mean[0] - 1.96 * se[0] <= -500) &
(mean[0] + 1.96 * se[0] >= -500))
# The standard deviation goes from 1 to .5 => TE = -.5
moment_in_ci[:, 2, 1] += ((mean[1] - 1.96 * se[1] <= -.5) &
(mean[1] + 1.96 * se[1] >= -.5))
quantile_coverage = quantile_in_ci / n_trials
assert_allclose(quantile_coverage,
np.ones_like(quantile_coverage) * .95,
rtol=5e-2)
moment_coverage = moment_in_ci / n_trials
assert_allclose(moment_coverage,
np.ones_like(moment_in_ci) * .95,
rtol=5e-2)
def test_test_model_based_on_quantile_valid():
np.random.seed(3423482)
treat = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
], dtype=np.bool)
n_trials = 100
n_obs = 500
quantiles = np.array([.5])
reject = 0
for trial_ind in range(n_trials):
g, t, y = generate_sample(n_obs)
# y[(g == 1) & (t == 2)] = 2 * y[(g == 1) & (t == 2)] - 3
# y[(g == 2) & (t == 1)] = np.exp(y[(g == 2) & (t == 1)])
# y[(g == 1) & (t == 2)] *= 2
# y[(g == 2) & (t == 1)] -= 3
# y[(g == 2) & (t == 2)] += 1
model = cic.CICModel(y, g, t, treat, quantiles)
test_stat, rank_dist = model.test_model_based_on_quantile(0)
crit_val = scipy.stats.chi2.ppf(.95, rank_dist)
# import pdb; pdb.set_trace()
if test_stat > crit_val:
reject += 1
reject_prob = reject / n_trials
# Just check that the rejection probability is not too large.
# To get reject_prob~0.05 increse n_obs above, but this slows
# down the test too much.
assert reject_prob <= 0.05
def test_combine_effects():
np.random.seed(4545543)
treat = np.array([
[0, 0, 0],
[0, 1, 1]
], dtype=np.bool)
g = np.concatenate((np.zeros(3000, dtype=np.int_), np.ones(4000, dtype=np.int_)))
t = np.concatenate((np.full(1000, 0), np.full(1000, 1), np.full(1000, 2),
np.full(1000, 0), np.full(1000, 1), np.full(2000, 2)))
y = np.random.randn(7000)
y[(g == 1) & (t == 1)] += 1
y[(g == 1) & (t == 2)] += 2
model = cic.CICModel(y, g, t, treat, np.array([.5, .6]), [np.mean], n_draws=2000)
qte_effect, _, moment_effect, _ = model.combine_effects([(1, 1), (1, 2)])
true_effect = 1 / 3 + 2 * 2 / 3
assert_allclose(qte_effect, true_effect, rtol=5e-2)
assert_allclose(moment_effect, true_effect, rtol=5e-2)
def generate_sample(n_obs):
g = np.random.choice(np.arange(3), n_obs)
t = np.random.choice(np.arange(3), n_obs)
u = np.random.randn(n_obs)
y = np.empty(n_obs)
y[t == 0] = u[t == 0]**3
y[t == 1] = u[t == 1] / 3
y[t == 2] = u[t == 2] + 1000
return g, t, y
| true | true |
f731f50ee094446322139452ad53777186ce7f20 | 15,626 | py | Python | Servidor/ServidorMusa.py | TheSgtPepper23/Musa | ed670b9052bbd37b3ed229b7e9d8307b89cb8ac2 | [
"MIT"
] | null | null | null | Servidor/ServidorMusa.py | TheSgtPepper23/Musa | ed670b9052bbd37b3ed229b7e9d8307b89cb8ac2 | [
"MIT"
] | null | null | null | Servidor/ServidorMusa.py | TheSgtPepper23/Musa | ed670b9052bbd37b3ed229b7e9d8307b89cb8ac2 | [
"MIT"
] | null | null | null | from flask import *
from peewee import *
import sys
from playhouse.shortcuts import model_to_dict, dict_to_model
from base64 import b64encode
app = Flask(__name__)
musa_db = MySQLDatabase(
"musa", host="localhost", port=3306, user="euterpe", passwd="An6248322")
class MySQLModel(Model):
"""Database model"""
class Meta:
database = musa_db
# Mensajes
# 5 - Todo PERFECTO
# 51 - Melomano login
# 52 - Artista login
# 7 - Todo mal
# 1 - Contraseña incorrecta
# 2 - El usuario no existe
# 3 - Lista vacia
# 4 - Usuario registrado
# 6 - El usuario ya existe
# 10 - Álbum registrado
# 11 - Error al registrar álbum
# 12 - El artista de agregó
# 13 - Error al agregar al artista
# 14 - Se agregó la canción
# 15 - No se pudo agregar la canción
# 16 - Se actualizó el artista
# 17 - Error al actualizar el artista
# 18 - Se actualizó el melómano
# 19 - Error al actualizar el melómano
# 20 - Se agregó la canción a la playlist
# 21 - No se agregó la canción a la playlist
# 300 - Contraseñas no coinciden
class Melomano(MySQLModel):
idMelomano = PrimaryKeyField()
nombreMelomano = CharField()
nombre = CharField()
apellidos = CharField()
password = CharField()
fotoPerfil = TextField()
correoElectronico = CharField()
class Genero(MySQLModel):
idGenero = PrimaryKeyField()
genero = CharField()
class Artista(MySQLModel):
idArtista = PrimaryKeyField()
nombre = CharField()
biografia = CharField()
correoElectronico = CharField()
password = CharField()
idGenero = ForeignKeyField(Genero, db_column = "idGenero")
class Album(MySQLModel):
idAlbum = PrimaryKeyField()
nombre = CharField()
portada = TextField()
fechaLanzamiento = DateField()
companiaDiscografica = CharField()
idArtista = ForeignKeyField(Artista, db_column = "idArtista")
class Cancion(MySQLModel):
idCancion = PrimaryKeyField()
nombre = CharField()
idAlbum = ForeignKeyField(Album, db_column = "idAlbum")
idGenero = ForeignKeyField(Genero, db_column = "idGenero")
cancion = TextField()
duracion = IntegerField()
class Playlist(MySQLModel):
idPlaylist = PrimaryKeyField()
nombre = CharField()
portada = TextField()
idMelomano = ForeignKeyField(Melomano, db_column = "idMelomano")
class CancionesPlaylist(MySQLModel):
idPlaylist = ForeignKeyField(Playlist, db_column = "idPlaylist")
idCancion = ForeignKeyField(Cancion, db_column = "idCancion")
class Calificacion(MySQLModel):
idCancion = ForeignKeyField(Cancion, db_column = "idCancion")
nombreUsuario = ForeignKeyField(Melomano, db_column = "idMelomano")
calificacion = IntegerField()
class CancionPropia(MySQLModel):
idCancionPropia = PrimaryKeyField()
nombre = CharField()
cancion = TextField()
nombreUsuario = ForeignKeyField(Melomano, db_column = "idMelomano")
class FotoArtista(MySQLModel):
idFoto = PrimaryKeyField()
foto = TextField()
idArtista = ForeignKeyField(Artista, db_column = "idArtista")
class Historial(MySQLModel):
idHistorial = PrimaryKeyField()
idCancion = ForeignKeyField(Cancion, db_column = "idCancion")
idMelomano = ForeignKeyField(Melomano, db_column = "idMelomano")
@app.route("/")
def main():
return jsonify("Musa server. versión 1.0")
@app.route("/login", methods=["POST"])
def iniciar_sesion():
mensaje = 2
for melomano in Melomano.select():
if (melomano.nombreMelomano == request.form['username']) & (melomano.password == request.form['password']):
mensaje = 51
for artista in Artista.select():
if (artista.correoElectronico == request.form['username']) & (artista.password == request.form['password']):
mensaje = 52
return jsonify(mensaje)
"""Melómano WS"""
@app.route("/melomano/agregar", methods=["POST"])
def registrar_melomano():
with musa_db.atomic():
try:
melomano = Melomano.create(
nombreMelomano = request.form['nombreMelomano'],
nombre = request.form['nombre'],
apellidos = request.form['apellidos'],
password = request.form['password'],
fotoPerfil = request.form['fotoPerfil'],
correoElectronico = request.form['correoElectronico'])
mensaje = 4
except IntegrityError:
mensaje = 6
return jsonify(mensaje)
@app.route("/melomano/recuperar", methods=["POST"])
def recuperarMelomano():
melomano = Melomano.get(Melomano.nombreMelomano == request.form['nombreMelomano'])
return jsonify(model_to_dict(melomano))
@app.route("/melomano/actualizar", methods=["POST"])
def actualizar_melomano():
try:
melomano = Melomano.select().where(Melomano.idMelomano == request.form["idMelomano"]).get()
melomano.nombre = request.form["nombre"]
melomano.apellidos = request.form["apellidos"]
melomano.password = request.form["password"]
melomano.fotoPerfil = request.form["fotoPerfil"]
melomano.correoElectronico = request.form["correoElectronico"]
melomano.save()
mensaje = 18
except IntegrityError:
mensaje = 19
return jsonify(mensaje)
"""Artista WS"""
@app.route("/artista/agregar", methods=["POST"])
def agregar_artista():
with musa_db.atomic():
try:
artista = Artista.create(
nombre = request.form['nombre'],
biografia = request.form['biografia'],
idGenero = request.form['genero'],
correoElectronico = request.form['correoElectronico'],
password = request.form['password']
)
mensaje = 12
except IntegrityError:
mensaje = 13
return jsonify(mensaje)
@app.route("/artista/actualizar", methods=["POST"])
def actualizar_artista():
try:
artista = Artista.select().where(Artista.idArtista == request.form["idArtista"]).get()
artista.biografia = request.form["biografia"]
artista.save()
mensaje = 16
except IntegrityError:
mensaje = 17
return jsonify(mensaje)
@app.route("/artista/recuperarArtista", methods=["POST"])
def recuperar_artista():
artista = Artista.select().where(Artista.correoElectronico == request.form["nombre"]).get()
resultado = {"idArtista": artista.idArtista, "nombre": artista.nombre, "biografia": artista.biografia,
"correoElectronico": artista.correoElectronico, "password": artista.password,
"idGenero": artista.idGenero.idGenero}
return jsonify(resultado)
@app.route("/artista/subirFoto", methods=["POST"])
def subir_foto_artista():
with musa_db.atomic():
try:
foto = FotoArtista.create(
foto = request.form['foto'],
idArtista = request.form['idArtista']
)
mensaje = 16
except IntegrityError:
mensaje = 17
return jsonify(mensaje)
@app.route("/artista/borrarFotos", methods=["DELETE"])
def eliminar_fotos_artista():
try:
FotoArtista.delete().where(FotoArtista.idArtista == request.form['idArtista']).execute()
mensaje = 16
except IntegrityError:
mensaje = 17
return jsonify(mensaje)
@app.route("/artista/recuperarFotos", methods=["POST"])
def recuperar_fotos_artista():
query = FotoArtista.select(FotoArtista.foto).where(FotoArtista.idArtista == request.form['idArtista'])
lista_foto = []
for foto in query:
lista_foto.append(model_to_dict(foto))
return (jsonify(lista_foto))
"""Canción WS"""
@app.route("/cancion/agregar", methods=["POST"])
def agregar_cancion():
with musa_db.atomic():
try:
cancion = Cancion.create(
nombre = request.form['nombre'],
idAlbum = int (request.form['idAlbum']),
idGenero = int (request.form['idGenero']),
cancion = request.form['cancion'],
duracion = int (request.form['duracion']),
)
mensaje = 14
except IntegrityError:
mensaje = 15
return jsonify(mensaje)
@app.route("/cancion/cancionesArtista", methods=["POST"])
def recuperar_canciones_artista():
query = Cancion.select().join(Album).join(Artista).where(Artista.nombre == request.form["nombreArtista"])
songs = []
for cancion in query:
song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre,
"album":cancion.idAlbum.nombre, "duracion": cancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/cancion/buscar", methods=["POST"])
def buscar_canciones():
query = Cancion.select().join(Album).join(Artista).where(Artista.nombre.contains(request.form["nombre"]) |
(Cancion.nombre.contains(request.form["nombre"]) |
(Album.nombre.contains(request.form["nombre"]))))
songs = []
for cancion in query:
song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre,
"album":cancion.idAlbum.nombre, "duracion": cancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/cancion/recuperarTodas", methods=["GET"])
def recuperar_todas_canciones():
query = Cancion.select().join(Album).join(Artista)
songs = []
for cancion in query:
song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre,
"album":cancion.idAlbum.nombre, "duracion": cancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/cancion/actualizarRuta", methods=["POST"])
def actualizar_ruta():
try:
ultima = Cancion.select().order_by(Cancion.idCancion.desc()).get()
cancion = Cancion.select().where(Cancion.idCancion == ultima.idCancion).get()
cancion.cancion = request.form["ruta"]
cancion.save()
mensaje = 400
except IntegrityError:
mensaje = 401
return jsonify(mensaje)
@app.route("/cancion/nombreUltimaCancion", methods=["GET"])
def nombre_ultima_cancion():
query = Cancion.select().order_by(Cancion.idCancion.desc()).get()
return jsonify(query.nombre)
@app.route("/cancion/cancionAPartirDeID", methods=["POST"])
def cancion_ruta_id():
query = Cancion.select().where(Cancion.idCancion == request.form['idCancion']).get();
return jsonify(query.cancion)
"""Álbum WS"""
@app.route("/album/agregar", methods=["POST"])
def agregar_album():
with musa_db.atomic():
try:
album = Album.create(
nombre = request.form['nombre'],
portada = request.form['portada'],
fechaLanzamiento = request.form['fechaLanzamiento'],
companiaDiscografica = request.form['companiaDiscografica'],
idArtista = int (request.form['idArtista'])
)
mensaje = 10
except IntegrityError:
mensaje = 11
return jsonify(mensaje)
@app.route("/album/recuperarUltimo", methods=["GET"])
def recuperar_ultimo_album():
query = Album.select().join(Artista).order_by(Album.idAlbum.desc()).get()
album = {"idAlbum": query.idAlbum, "nombre": query.nombre, "portada": None, "fechaLanzamiento": None,
"companiaDiscografica": None, "idArtista": query.idArtista.idArtista}
return jsonify(album)
@app.route("/album/deArtista", methods=["POST"])
def recuperar_de_artista():
query = Album.select().where(Album.idArtista == request.form["idArtista"])
albumes = []
for album in query:
alb = {"idAlbum": album.idAlbum, "nombre": album.nombre, "portada": album.portada,
"fechaLanzamiento": album.fechaLanzamiento, "companiaDiscografica": album.companiaDiscografica,
"idArtista": album.idArtista.idArtista}
albumes.append(alb)
return jsonify(albumes)
@app.route("/album/recuperarFoto", methods=["POST"])
def recuperar_foto_album():
query = Album.select().where(Album.nombre == request.form["nombre"]).get()
return jsonify(query.portada)
"""Playlist WS"""
@app.route("/playlist/recuperarMelomano", methods=["POST"])
def recuperar_playlist():
listas = Playlist.select().where(Playlist.idMelomano == request.form["idMelomano"])
playlists = []
for lista in listas:
oneLista = {"idPlaylist": lista.idPlaylist, "nombre": lista.nombre, "portada": lista.portada}
playlists.append(oneLista)
return jsonify(playlists)
@app.route("/playlist/agregaAPlaylist", methods=["POST"])
def agregar_a_playlist():
with musa_db.atomic():
try:
playlist = CancionesPlaylist.create(
idPlaylist = request.form['idPlaylist'],
idCancion = request.form['idCancion']
)
mensaje = 20
except IntegrityError:
mensaje = 21
return jsonify(mensaje)
@app.route("/playlist/recuperarCanciones", methods=["POST"])
def recuperar_de_playlist():
canciones = Cancion.select().join(CancionesPlaylist).join(Playlist).where((Playlist.idMelomano == request.form["idMelomano"])
& (Playlist.idPlaylist == request.form["idPlaylist"]))
songs = []
for cancion in canciones:
song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre,
"album":cancion.idAlbum.nombre, "duracion": cancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/playlist/agregarPlaylist", methods=["POST"])
def agregar_playlist():
with musa_db.atomic():
try:
playlist = Playlist.create(
nombre = request.form['nombre'],
portada = request.form['portada'],
idMelomano = request.form['idMelomano']
)
mensaje = 900
except IntegrityError:
mensaje = 901
return jsonify(mensaje)
"""Historial WS"""
@app.route("/historial/agregarHistorial", methods=["POST"])
def agregar_historial():
with musa_db.atomic():
try:
historial = Historial.create(
idCancion = int(request.form['idCancion']),
idMelomano = int(request.form['idMelomano'])
)
mensaje = 500
except IntegrityError:
mensaje = 501
return jsonify(mensaje)
@app.route("/historial/consultarMelomano", methods=["POST"])
def consultar_historial():
query = Historial.select().join(Cancion).join(Album).join(Artista).select().where(
Historial.idMelomano == request.form["idMelomano"])
songs = []
for cancion in query:
song = {"idCancion": cancion.idCancion.idCancion, "nombre": cancion.idCancion.nombre, "artista": cancion.idCancion.idAlbum.idArtista.nombre,
"album":cancion.idCancion.idAlbum.nombre, "duracion": cancion.idCancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/historial/getUltimoHistorial", methods=["GET"])
def ultimo_historial():
query = Historial.select().join(Cancion).order_by(Historial.idHistorial.desc()).get()
return jsonify(query.idCancion.cancion)
"""Género WS"""
@app.route("/genero/recuperarGeneros", methods=["GET"])
def recuperar_generos():
generos = []
query_generos = Genero.select()
for genero in query_generos:
generos.append(model_to_dict(genero))
return jsonify(generos)
if __name__ == "__main__":
app.run(host = '206.189.124.168', port = '5555', debug = True)
| 33.822511 | 148 | 0.651222 | from flask import *
from peewee import *
import sys
from playhouse.shortcuts import model_to_dict, dict_to_model
from base64 import b64encode
app = Flask(__name__)
musa_db = MySQLDatabase(
"musa", host="localhost", port=3306, user="euterpe", passwd="An6248322")
class MySQLModel(Model):
class Meta:
database = musa_db
class Melomano(MySQLModel):
idMelomano = PrimaryKeyField()
nombreMelomano = CharField()
nombre = CharField()
apellidos = CharField()
password = CharField()
fotoPerfil = TextField()
correoElectronico = CharField()
class Genero(MySQLModel):
idGenero = PrimaryKeyField()
genero = CharField()
class Artista(MySQLModel):
idArtista = PrimaryKeyField()
nombre = CharField()
biografia = CharField()
correoElectronico = CharField()
password = CharField()
idGenero = ForeignKeyField(Genero, db_column = "idGenero")
class Album(MySQLModel):
idAlbum = PrimaryKeyField()
nombre = CharField()
portada = TextField()
fechaLanzamiento = DateField()
companiaDiscografica = CharField()
idArtista = ForeignKeyField(Artista, db_column = "idArtista")
class Cancion(MySQLModel):
idCancion = PrimaryKeyField()
nombre = CharField()
idAlbum = ForeignKeyField(Album, db_column = "idAlbum")
idGenero = ForeignKeyField(Genero, db_column = "idGenero")
cancion = TextField()
duracion = IntegerField()
class Playlist(MySQLModel):
idPlaylist = PrimaryKeyField()
nombre = CharField()
portada = TextField()
idMelomano = ForeignKeyField(Melomano, db_column = "idMelomano")
class CancionesPlaylist(MySQLModel):
idPlaylist = ForeignKeyField(Playlist, db_column = "idPlaylist")
idCancion = ForeignKeyField(Cancion, db_column = "idCancion")
class Calificacion(MySQLModel):
idCancion = ForeignKeyField(Cancion, db_column = "idCancion")
nombreUsuario = ForeignKeyField(Melomano, db_column = "idMelomano")
calificacion = IntegerField()
class CancionPropia(MySQLModel):
idCancionPropia = PrimaryKeyField()
nombre = CharField()
cancion = TextField()
nombreUsuario = ForeignKeyField(Melomano, db_column = "idMelomano")
class FotoArtista(MySQLModel):
idFoto = PrimaryKeyField()
foto = TextField()
idArtista = ForeignKeyField(Artista, db_column = "idArtista")
class Historial(MySQLModel):
idHistorial = PrimaryKeyField()
idCancion = ForeignKeyField(Cancion, db_column = "idCancion")
idMelomano = ForeignKeyField(Melomano, db_column = "idMelomano")
@app.route("/")
def main():
return jsonify("Musa server. versión 1.0")
@app.route("/login", methods=["POST"])
def iniciar_sesion():
mensaje = 2
for melomano in Melomano.select():
if (melomano.nombreMelomano == request.form['username']) & (melomano.password == request.form['password']):
mensaje = 51
for artista in Artista.select():
if (artista.correoElectronico == request.form['username']) & (artista.password == request.form['password']):
mensaje = 52
return jsonify(mensaje)
@app.route("/melomano/agregar", methods=["POST"])
def registrar_melomano():
with musa_db.atomic():
try:
melomano = Melomano.create(
nombreMelomano = request.form['nombreMelomano'],
nombre = request.form['nombre'],
apellidos = request.form['apellidos'],
password = request.form['password'],
fotoPerfil = request.form['fotoPerfil'],
correoElectronico = request.form['correoElectronico'])
mensaje = 4
except IntegrityError:
mensaje = 6
return jsonify(mensaje)
@app.route("/melomano/recuperar", methods=["POST"])
def recuperarMelomano():
melomano = Melomano.get(Melomano.nombreMelomano == request.form['nombreMelomano'])
return jsonify(model_to_dict(melomano))
@app.route("/melomano/actualizar", methods=["POST"])
def actualizar_melomano():
try:
melomano = Melomano.select().where(Melomano.idMelomano == request.form["idMelomano"]).get()
melomano.nombre = request.form["nombre"]
melomano.apellidos = request.form["apellidos"]
melomano.password = request.form["password"]
melomano.fotoPerfil = request.form["fotoPerfil"]
melomano.correoElectronico = request.form["correoElectronico"]
melomano.save()
mensaje = 18
except IntegrityError:
mensaje = 19
return jsonify(mensaje)
@app.route("/artista/agregar", methods=["POST"])
def agregar_artista():
with musa_db.atomic():
try:
artista = Artista.create(
nombre = request.form['nombre'],
biografia = request.form['biografia'],
idGenero = request.form['genero'],
correoElectronico = request.form['correoElectronico'],
password = request.form['password']
)
mensaje = 12
except IntegrityError:
mensaje = 13
return jsonify(mensaje)
@app.route("/artista/actualizar", methods=["POST"])
def actualizar_artista():
try:
artista = Artista.select().where(Artista.idArtista == request.form["idArtista"]).get()
artista.biografia = request.form["biografia"]
artista.save()
mensaje = 16
except IntegrityError:
mensaje = 17
return jsonify(mensaje)
@app.route("/artista/recuperarArtista", methods=["POST"])
def recuperar_artista():
artista = Artista.select().where(Artista.correoElectronico == request.form["nombre"]).get()
resultado = {"idArtista": artista.idArtista, "nombre": artista.nombre, "biografia": artista.biografia,
"correoElectronico": artista.correoElectronico, "password": artista.password,
"idGenero": artista.idGenero.idGenero}
return jsonify(resultado)
@app.route("/artista/subirFoto", methods=["POST"])
def subir_foto_artista():
with musa_db.atomic():
try:
foto = FotoArtista.create(
foto = request.form['foto'],
idArtista = request.form['idArtista']
)
mensaje = 16
except IntegrityError:
mensaje = 17
return jsonify(mensaje)
@app.route("/artista/borrarFotos", methods=["DELETE"])
def eliminar_fotos_artista():
try:
FotoArtista.delete().where(FotoArtista.idArtista == request.form['idArtista']).execute()
mensaje = 16
except IntegrityError:
mensaje = 17
return jsonify(mensaje)
@app.route("/artista/recuperarFotos", methods=["POST"])
def recuperar_fotos_artista():
query = FotoArtista.select(FotoArtista.foto).where(FotoArtista.idArtista == request.form['idArtista'])
lista_foto = []
for foto in query:
lista_foto.append(model_to_dict(foto))
return (jsonify(lista_foto))
@app.route("/cancion/agregar", methods=["POST"])
def agregar_cancion():
with musa_db.atomic():
try:
cancion = Cancion.create(
nombre = request.form['nombre'],
idAlbum = int (request.form['idAlbum']),
idGenero = int (request.form['idGenero']),
cancion = request.form['cancion'],
duracion = int (request.form['duracion']),
)
mensaje = 14
except IntegrityError:
mensaje = 15
return jsonify(mensaje)
@app.route("/cancion/cancionesArtista", methods=["POST"])
def recuperar_canciones_artista():
query = Cancion.select().join(Album).join(Artista).where(Artista.nombre == request.form["nombreArtista"])
songs = []
for cancion in query:
song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre,
"album":cancion.idAlbum.nombre, "duracion": cancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/cancion/buscar", methods=["POST"])
def buscar_canciones():
query = Cancion.select().join(Album).join(Artista).where(Artista.nombre.contains(request.form["nombre"]) |
(Cancion.nombre.contains(request.form["nombre"]) |
(Album.nombre.contains(request.form["nombre"]))))
songs = []
for cancion in query:
song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre,
"album":cancion.idAlbum.nombre, "duracion": cancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/cancion/recuperarTodas", methods=["GET"])
def recuperar_todas_canciones():
query = Cancion.select().join(Album).join(Artista)
songs = []
for cancion in query:
song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre,
"album":cancion.idAlbum.nombre, "duracion": cancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/cancion/actualizarRuta", methods=["POST"])
def actualizar_ruta():
try:
ultima = Cancion.select().order_by(Cancion.idCancion.desc()).get()
cancion = Cancion.select().where(Cancion.idCancion == ultima.idCancion).get()
cancion.cancion = request.form["ruta"]
cancion.save()
mensaje = 400
except IntegrityError:
mensaje = 401
return jsonify(mensaje)
@app.route("/cancion/nombreUltimaCancion", methods=["GET"])
def nombre_ultima_cancion():
query = Cancion.select().order_by(Cancion.idCancion.desc()).get()
return jsonify(query.nombre)
@app.route("/cancion/cancionAPartirDeID", methods=["POST"])
def cancion_ruta_id():
query = Cancion.select().where(Cancion.idCancion == request.form['idCancion']).get();
return jsonify(query.cancion)
@app.route("/album/agregar", methods=["POST"])
def agregar_album():
with musa_db.atomic():
try:
album = Album.create(
nombre = request.form['nombre'],
portada = request.form['portada'],
fechaLanzamiento = request.form['fechaLanzamiento'],
companiaDiscografica = request.form['companiaDiscografica'],
idArtista = int (request.form['idArtista'])
)
mensaje = 10
except IntegrityError:
mensaje = 11
return jsonify(mensaje)
@app.route("/album/recuperarUltimo", methods=["GET"])
def recuperar_ultimo_album():
query = Album.select().join(Artista).order_by(Album.idAlbum.desc()).get()
album = {"idAlbum": query.idAlbum, "nombre": query.nombre, "portada": None, "fechaLanzamiento": None,
"companiaDiscografica": None, "idArtista": query.idArtista.idArtista}
return jsonify(album)
@app.route("/album/deArtista", methods=["POST"])
def recuperar_de_artista():
query = Album.select().where(Album.idArtista == request.form["idArtista"])
albumes = []
for album in query:
alb = {"idAlbum": album.idAlbum, "nombre": album.nombre, "portada": album.portada,
"fechaLanzamiento": album.fechaLanzamiento, "companiaDiscografica": album.companiaDiscografica,
"idArtista": album.idArtista.idArtista}
albumes.append(alb)
return jsonify(albumes)
@app.route("/album/recuperarFoto", methods=["POST"])
def recuperar_foto_album():
query = Album.select().where(Album.nombre == request.form["nombre"]).get()
return jsonify(query.portada)
@app.route("/playlist/recuperarMelomano", methods=["POST"])
def recuperar_playlist():
listas = Playlist.select().where(Playlist.idMelomano == request.form["idMelomano"])
playlists = []
for lista in listas:
oneLista = {"idPlaylist": lista.idPlaylist, "nombre": lista.nombre, "portada": lista.portada}
playlists.append(oneLista)
return jsonify(playlists)
@app.route("/playlist/agregaAPlaylist", methods=["POST"])
def agregar_a_playlist():
with musa_db.atomic():
try:
playlist = CancionesPlaylist.create(
idPlaylist = request.form['idPlaylist'],
idCancion = request.form['idCancion']
)
mensaje = 20
except IntegrityError:
mensaje = 21
return jsonify(mensaje)
@app.route("/playlist/recuperarCanciones", methods=["POST"])
def recuperar_de_playlist():
canciones = Cancion.select().join(CancionesPlaylist).join(Playlist).where((Playlist.idMelomano == request.form["idMelomano"])
& (Playlist.idPlaylist == request.form["idPlaylist"]))
songs = []
for cancion in canciones:
song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre,
"album":cancion.idAlbum.nombre, "duracion": cancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/playlist/agregarPlaylist", methods=["POST"])
def agregar_playlist():
with musa_db.atomic():
try:
playlist = Playlist.create(
nombre = request.form['nombre'],
portada = request.form['portada'],
idMelomano = request.form['idMelomano']
)
mensaje = 900
except IntegrityError:
mensaje = 901
return jsonify(mensaje)
@app.route("/historial/agregarHistorial", methods=["POST"])
def agregar_historial():
with musa_db.atomic():
try:
historial = Historial.create(
idCancion = int(request.form['idCancion']),
idMelomano = int(request.form['idMelomano'])
)
mensaje = 500
except IntegrityError:
mensaje = 501
return jsonify(mensaje)
@app.route("/historial/consultarMelomano", methods=["POST"])
def consultar_historial():
query = Historial.select().join(Cancion).join(Album).join(Artista).select().where(
Historial.idMelomano == request.form["idMelomano"])
songs = []
for cancion in query:
song = {"idCancion": cancion.idCancion.idCancion, "nombre": cancion.idCancion.nombre, "artista": cancion.idCancion.idAlbum.idArtista.nombre,
"album":cancion.idCancion.idAlbum.nombre, "duracion": cancion.idCancion.duracion}
songs.append(song)
return jsonify(songs)
@app.route("/historial/getUltimoHistorial", methods=["GET"])
def ultimo_historial():
query = Historial.select().join(Cancion).order_by(Historial.idHistorial.desc()).get()
return jsonify(query.idCancion.cancion)
@app.route("/genero/recuperarGeneros", methods=["GET"])
def recuperar_generos():
generos = []
query_generos = Genero.select()
for genero in query_generos:
generos.append(model_to_dict(genero))
return jsonify(generos)
if __name__ == "__main__":
app.run(host = '206.189.124.168', port = '5555', debug = True)
| true | true |
f731f62c057be653547c98aaf27b9d20dd30444f | 8,308 | py | Python | alipay/aop/api/domain/HealthServiceFamilyDoctorDrugDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/HealthServiceFamilyDoctorDrugDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/HealthServiceFamilyDoctorDrugDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class HealthServiceFamilyDoctorDrugDTO(object):
def __init__(self):
self._catalogue_listed = None
self._dosage_forms = None
self._drug_classification = None
self._general_name = None
self._inventory = None
self._item_id = None
self._item_name = None
self._manufacturer_name = None
self._max_purchase_quantity = None
self._min_purchase_quantity = None
self._price = None
self._specifications = None
self._support_emergency_delivery = None
self._usage_dosage = None
@property
def catalogue_listed(self):
return self._catalogue_listed
@catalogue_listed.setter
def catalogue_listed(self, value):
self._catalogue_listed = value
@property
def dosage_forms(self):
return self._dosage_forms
@dosage_forms.setter
def dosage_forms(self, value):
self._dosage_forms = value
@property
def drug_classification(self):
return self._drug_classification
@drug_classification.setter
def drug_classification(self, value):
self._drug_classification = value
@property
def general_name(self):
return self._general_name
@general_name.setter
def general_name(self, value):
self._general_name = value
@property
def inventory(self):
return self._inventory
@inventory.setter
def inventory(self, value):
self._inventory = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def manufacturer_name(self):
return self._manufacturer_name
@manufacturer_name.setter
def manufacturer_name(self, value):
self._manufacturer_name = value
@property
def max_purchase_quantity(self):
return self._max_purchase_quantity
@max_purchase_quantity.setter
def max_purchase_quantity(self, value):
self._max_purchase_quantity = value
@property
def min_purchase_quantity(self):
return self._min_purchase_quantity
@min_purchase_quantity.setter
def min_purchase_quantity(self, value):
self._min_purchase_quantity = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def specifications(self):
return self._specifications
@specifications.setter
def specifications(self, value):
self._specifications = value
@property
def support_emergency_delivery(self):
return self._support_emergency_delivery
@support_emergency_delivery.setter
def support_emergency_delivery(self, value):
self._support_emergency_delivery = value
@property
def usage_dosage(self):
return self._usage_dosage
@usage_dosage.setter
def usage_dosage(self, value):
self._usage_dosage = value
def to_alipay_dict(self):
params = dict()
if self.catalogue_listed:
if hasattr(self.catalogue_listed, 'to_alipay_dict'):
params['catalogue_listed'] = self.catalogue_listed.to_alipay_dict()
else:
params['catalogue_listed'] = self.catalogue_listed
if self.dosage_forms:
if hasattr(self.dosage_forms, 'to_alipay_dict'):
params['dosage_forms'] = self.dosage_forms.to_alipay_dict()
else:
params['dosage_forms'] = self.dosage_forms
if self.drug_classification:
if hasattr(self.drug_classification, 'to_alipay_dict'):
params['drug_classification'] = self.drug_classification.to_alipay_dict()
else:
params['drug_classification'] = self.drug_classification
if self.general_name:
if hasattr(self.general_name, 'to_alipay_dict'):
params['general_name'] = self.general_name.to_alipay_dict()
else:
params['general_name'] = self.general_name
if self.inventory:
if hasattr(self.inventory, 'to_alipay_dict'):
params['inventory'] = self.inventory.to_alipay_dict()
else:
params['inventory'] = self.inventory
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.manufacturer_name:
if hasattr(self.manufacturer_name, 'to_alipay_dict'):
params['manufacturer_name'] = self.manufacturer_name.to_alipay_dict()
else:
params['manufacturer_name'] = self.manufacturer_name
if self.max_purchase_quantity:
if hasattr(self.max_purchase_quantity, 'to_alipay_dict'):
params['max_purchase_quantity'] = self.max_purchase_quantity.to_alipay_dict()
else:
params['max_purchase_quantity'] = self.max_purchase_quantity
if self.min_purchase_quantity:
if hasattr(self.min_purchase_quantity, 'to_alipay_dict'):
params['min_purchase_quantity'] = self.min_purchase_quantity.to_alipay_dict()
else:
params['min_purchase_quantity'] = self.min_purchase_quantity
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.specifications:
if hasattr(self.specifications, 'to_alipay_dict'):
params['specifications'] = self.specifications.to_alipay_dict()
else:
params['specifications'] = self.specifications
if self.support_emergency_delivery:
if hasattr(self.support_emergency_delivery, 'to_alipay_dict'):
params['support_emergency_delivery'] = self.support_emergency_delivery.to_alipay_dict()
else:
params['support_emergency_delivery'] = self.support_emergency_delivery
if self.usage_dosage:
if hasattr(self.usage_dosage, 'to_alipay_dict'):
params['usage_dosage'] = self.usage_dosage.to_alipay_dict()
else:
params['usage_dosage'] = self.usage_dosage
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = HealthServiceFamilyDoctorDrugDTO()
if 'catalogue_listed' in d:
o.catalogue_listed = d['catalogue_listed']
if 'dosage_forms' in d:
o.dosage_forms = d['dosage_forms']
if 'drug_classification' in d:
o.drug_classification = d['drug_classification']
if 'general_name' in d:
o.general_name = d['general_name']
if 'inventory' in d:
o.inventory = d['inventory']
if 'item_id' in d:
o.item_id = d['item_id']
if 'item_name' in d:
o.item_name = d['item_name']
if 'manufacturer_name' in d:
o.manufacturer_name = d['manufacturer_name']
if 'max_purchase_quantity' in d:
o.max_purchase_quantity = d['max_purchase_quantity']
if 'min_purchase_quantity' in d:
o.min_purchase_quantity = d['min_purchase_quantity']
if 'price' in d:
o.price = d['price']
if 'specifications' in d:
o.specifications = d['specifications']
if 'support_emergency_delivery' in d:
o.support_emergency_delivery = d['support_emergency_delivery']
if 'usage_dosage' in d:
o.usage_dosage = d['usage_dosage']
return o
| 35.20339 | 103 | 0.630958 |
import json
from alipay.aop.api.constant.ParamConstants import *
class HealthServiceFamilyDoctorDrugDTO(object):
def __init__(self):
self._catalogue_listed = None
self._dosage_forms = None
self._drug_classification = None
self._general_name = None
self._inventory = None
self._item_id = None
self._item_name = None
self._manufacturer_name = None
self._max_purchase_quantity = None
self._min_purchase_quantity = None
self._price = None
self._specifications = None
self._support_emergency_delivery = None
self._usage_dosage = None
@property
def catalogue_listed(self):
return self._catalogue_listed
@catalogue_listed.setter
def catalogue_listed(self, value):
self._catalogue_listed = value
@property
def dosage_forms(self):
return self._dosage_forms
@dosage_forms.setter
def dosage_forms(self, value):
self._dosage_forms = value
@property
def drug_classification(self):
return self._drug_classification
@drug_classification.setter
def drug_classification(self, value):
self._drug_classification = value
@property
def general_name(self):
return self._general_name
@general_name.setter
def general_name(self, value):
self._general_name = value
@property
def inventory(self):
return self._inventory
@inventory.setter
def inventory(self, value):
self._inventory = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def manufacturer_name(self):
return self._manufacturer_name
@manufacturer_name.setter
def manufacturer_name(self, value):
self._manufacturer_name = value
@property
def max_purchase_quantity(self):
return self._max_purchase_quantity
@max_purchase_quantity.setter
def max_purchase_quantity(self, value):
self._max_purchase_quantity = value
@property
def min_purchase_quantity(self):
return self._min_purchase_quantity
@min_purchase_quantity.setter
def min_purchase_quantity(self, value):
self._min_purchase_quantity = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def specifications(self):
return self._specifications
@specifications.setter
def specifications(self, value):
self._specifications = value
@property
def support_emergency_delivery(self):
return self._support_emergency_delivery
@support_emergency_delivery.setter
def support_emergency_delivery(self, value):
self._support_emergency_delivery = value
@property
def usage_dosage(self):
return self._usage_dosage
@usage_dosage.setter
def usage_dosage(self, value):
self._usage_dosage = value
def to_alipay_dict(self):
params = dict()
if self.catalogue_listed:
if hasattr(self.catalogue_listed, 'to_alipay_dict'):
params['catalogue_listed'] = self.catalogue_listed.to_alipay_dict()
else:
params['catalogue_listed'] = self.catalogue_listed
if self.dosage_forms:
if hasattr(self.dosage_forms, 'to_alipay_dict'):
params['dosage_forms'] = self.dosage_forms.to_alipay_dict()
else:
params['dosage_forms'] = self.dosage_forms
if self.drug_classification:
if hasattr(self.drug_classification, 'to_alipay_dict'):
params['drug_classification'] = self.drug_classification.to_alipay_dict()
else:
params['drug_classification'] = self.drug_classification
if self.general_name:
if hasattr(self.general_name, 'to_alipay_dict'):
params['general_name'] = self.general_name.to_alipay_dict()
else:
params['general_name'] = self.general_name
if self.inventory:
if hasattr(self.inventory, 'to_alipay_dict'):
params['inventory'] = self.inventory.to_alipay_dict()
else:
params['inventory'] = self.inventory
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.manufacturer_name:
if hasattr(self.manufacturer_name, 'to_alipay_dict'):
params['manufacturer_name'] = self.manufacturer_name.to_alipay_dict()
else:
params['manufacturer_name'] = self.manufacturer_name
if self.max_purchase_quantity:
if hasattr(self.max_purchase_quantity, 'to_alipay_dict'):
params['max_purchase_quantity'] = self.max_purchase_quantity.to_alipay_dict()
else:
params['max_purchase_quantity'] = self.max_purchase_quantity
if self.min_purchase_quantity:
if hasattr(self.min_purchase_quantity, 'to_alipay_dict'):
params['min_purchase_quantity'] = self.min_purchase_quantity.to_alipay_dict()
else:
params['min_purchase_quantity'] = self.min_purchase_quantity
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.specifications:
if hasattr(self.specifications, 'to_alipay_dict'):
params['specifications'] = self.specifications.to_alipay_dict()
else:
params['specifications'] = self.specifications
if self.support_emergency_delivery:
if hasattr(self.support_emergency_delivery, 'to_alipay_dict'):
params['support_emergency_delivery'] = self.support_emergency_delivery.to_alipay_dict()
else:
params['support_emergency_delivery'] = self.support_emergency_delivery
if self.usage_dosage:
if hasattr(self.usage_dosage, 'to_alipay_dict'):
params['usage_dosage'] = self.usage_dosage.to_alipay_dict()
else:
params['usage_dosage'] = self.usage_dosage
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = HealthServiceFamilyDoctorDrugDTO()
if 'catalogue_listed' in d:
o.catalogue_listed = d['catalogue_listed']
if 'dosage_forms' in d:
o.dosage_forms = d['dosage_forms']
if 'drug_classification' in d:
o.drug_classification = d['drug_classification']
if 'general_name' in d:
o.general_name = d['general_name']
if 'inventory' in d:
o.inventory = d['inventory']
if 'item_id' in d:
o.item_id = d['item_id']
if 'item_name' in d:
o.item_name = d['item_name']
if 'manufacturer_name' in d:
o.manufacturer_name = d['manufacturer_name']
if 'max_purchase_quantity' in d:
o.max_purchase_quantity = d['max_purchase_quantity']
if 'min_purchase_quantity' in d:
o.min_purchase_quantity = d['min_purchase_quantity']
if 'price' in d:
o.price = d['price']
if 'specifications' in d:
o.specifications = d['specifications']
if 'support_emergency_delivery' in d:
o.support_emergency_delivery = d['support_emergency_delivery']
if 'usage_dosage' in d:
o.usage_dosage = d['usage_dosage']
return o
| true | true |
f731f6480135c9b24ffbeee3ad28a14744c53924 | 35,263 | py | Python | shap/explainers/_deep/deep_tf.py | hannesdm/shap | ae96bef7879f47978c8a436ebf19c2f2747cd887 | [
"MIT"
] | null | null | null | shap/explainers/_deep/deep_tf.py | hannesdm/shap | ae96bef7879f47978c8a436ebf19c2f2747cd887 | [
"MIT"
] | null | null | null | shap/explainers/_deep/deep_tf.py | hannesdm/shap | ae96bef7879f47978c8a436ebf19c2f2747cd887 | [
"MIT"
] | null | null | null | import numpy as np
import warnings
from .._explainer import Explainer
from packaging import version
from ..tf_utils import _get_session, _get_graph, _get_model_inputs, _get_model_output
keras = None
tf = None
tf_ops = None
tf_backprop = None
tf_execute = None
tf_gradients_impl = None
def custom_record_gradient(op_name, inputs, attrs, results):
""" This overrides tensorflow.python.eager.backprop._record_gradient.
We need to override _record_gradient in order to get gradient backprop to
get called for ResourceGather operations. In order to make this work we
temporarily "lie" about the input type to prevent the node from getting
pruned from the gradient backprop process. We then reset the type directly
afterwards back to what it was (an integer type).
"""
reset_input = False
if op_name == "ResourceGather" and inputs[1].dtype == tf.int32:
inputs[1].__dict__["_dtype"] = tf.float32
reset_input = True
try:
out = tf_backprop._record_gradient("shap_"+op_name, inputs, attrs, results) # old TF2.0 versions
except AttributeError:
out = tf_backprop.record_gradient("shap_"+op_name, inputs, attrs, results)
if reset_input:
inputs[1].__dict__["_dtype"] = tf.int32
return out
class TFDeep(Explainer):
"""
Using tf.gradients to implement the backgropagation was
inspired by the gradient based implementation approach proposed by Ancona et al, ICLR 2018. Note
that this package does not currently use the reveal-cancel rule for ReLu units proposed in DeepLIFT.
"""
def __init__(self, model, data, session=None, learning_phase_flags=None):
""" An explainer object for a deep model using a given background dataset.
Note that the complexity of the method scales linearly with the number of background data
samples. Passing the entire training dataset as `data` will give very accurate expected
values, but be unreasonably expensive. The variance of the expectation estimates scale by
roughly 1/sqrt(N) for N background data samples. So 100 samples will give a good estimate,
and 1000 samples a very good estimate of the expected values.
Parameters
----------
model : tf.keras.Model or (input : [tf.Operation], output : tf.Operation)
A keras model object or a pair of TensorFlow operations (or a list and an op) that
specifies the input and output of the model to be explained. Note that SHAP values
are specific to a single output value, so you get an explanation for each element of
the output tensor (which must be a flat rank one vector).
data : [numpy.array] or [pandas.DataFrame] or function
The background dataset to use for integrating out features. DeepExplainer integrates
over all these samples for each explanation. The data passed here must match the input
operations given to the model. If a function is supplied, it must be a function that
takes a particular input example and generates the background dataset for that example
session : None or tensorflow.Session
The TensorFlow session that has the model we are explaining. If None is passed then
we do our best to find the right session, first looking for a keras session, then
falling back to the default TensorFlow session.
learning_phase_flags : None or list of tensors
If you have your own custom learning phase flags pass them here. When explaining a prediction
we need to ensure we are not in training mode, since this changes the behavior of ops like
batch norm or dropout. If None is passed then we look for tensors in the graph that look like
learning phase flags (this works for Keras models). Note that we assume all the flags should
have a value of False during predictions (and hence explanations).
"""
# try and import keras and tensorflow
global tf, tf_ops, tf_backprop, tf_execute, tf_gradients_impl
if tf is None:
from tensorflow.python.framework import ops as tf_ops # pylint: disable=E0611
from tensorflow.python.ops import gradients_impl as tf_gradients_impl # pylint: disable=E0611
from tensorflow.python.eager import backprop as tf_backprop
from tensorflow.python.eager import execute as tf_execute
if not hasattr(tf_gradients_impl, "_IsBackpropagatable"):
from tensorflow.python.ops import gradients_util as tf_gradients_impl
import tensorflow as tf
if version.parse(tf.__version__) < version.parse("1.4.0"):
warnings.warn("Your TensorFlow version is older than 1.4.0 and not supported.")
global keras
if keras is None:
try:
import keras
warnings.warn("keras is no longer supported, please use tf.keras instead.")
except:
pass
if version.parse(tf.__version__) >= version.parse("2.4.0"):
warnings.warn("Your TensorFlow version is newer than 2.4.0 and so graph support has been removed in eager mode and some static graphs may not be supported. See PR #1483 for discussion.")
# determine the model inputs and outputs
self.model_inputs = _get_model_inputs(model)
self.model_output = _get_model_output(model)
assert type(self.model_output) != list, "The model output to be explained must be a single tensor!"
assert len(self.model_output.shape) < 3, "The model output must be a vector or a single value!"
self.multi_output = True
if len(self.model_output.shape) == 1:
self.multi_output = False
if tf.executing_eagerly():
if type(model) is tuple or type(model) is list:
assert len(model) == 2, "When a tuple is passed it must be of the form (inputs, outputs)"
from tensorflow.keras import Model
self.model = Model(model[0], model[1])
else:
self.model = model
# check if we have multiple inputs
self.multi_input = True
if type(self.model_inputs) != list or len(self.model_inputs) == 1:
self.multi_input = False
if type(self.model_inputs) != list:
self.model_inputs = [self.model_inputs]
if type(data) != list and (hasattr(data, '__call__')==False):
data = [data]
self.data = data
self._vinputs = {} # used to track what op inputs depends on the model inputs
self.orig_grads = {}
if not tf.executing_eagerly():
self.session = _get_session(session)
self.graph = _get_graph(self)
# if no learning phase flags were given we go looking for them
# ...this will catch the one that keras uses
# we need to find them since we want to make sure learning phase flags are set to False
if learning_phase_flags is None:
self.learning_phase_ops = []
for op in self.graph.get_operations():
if 'learning_phase' in op.name and op.type == "Const" and len(op.outputs[0].shape) == 0:
if op.outputs[0].dtype == tf.bool:
self.learning_phase_ops.append(op)
self.learning_phase_flags = [op.outputs[0] for op in self.learning_phase_ops]
else:
self.learning_phase_ops = [t.op for t in learning_phase_flags]
# save the expected output of the model
# if self.data is a function, set self.expected_value to None
if (hasattr(self.data, '__call__')):
self.expected_value = None
else:
if self.data[0].shape[0] > 5000:
warnings.warn("You have provided over 5k background samples! For better performance consider using smaller random sample.")
if not tf.executing_eagerly():
self.expected_value = self.run(self.model_output, self.model_inputs, self.data).mean(0)
else:
if type(self.model)is tuple:
sel.fModel(cnn.inputs, cnn.get_layer(theNameYouWant).outputs)
self.expected_value = tf.reduce_mean(self.model(self.data), 0)
if not tf.executing_eagerly():
self._init_between_tensors(self.model_output.op, self.model_inputs)
# make a blank array that will get lazily filled in with the SHAP value computation
# graphs for each output. Lazy is important since if there are 1000 outputs and we
# only explain the top 5 it would be a waste to build graphs for the other 995
if not self.multi_output:
self.phi_symbolics = [None]
else:
noutputs = self.model_output.shape.as_list()[1]
if noutputs is not None:
self.phi_symbolics = [None for i in range(noutputs)]
else:
raise Exception("The model output tensor to be explained cannot have a static shape in dim 1 of None!")
def _get_model_output(self, model):
if len(model.layers[-1]._inbound_nodes) == 0:
if len(model.outputs) > 1:
warnings.warn("Only one model output supported.")
return model.outputs[0]
else:
return model.layers[-1].output
def _init_between_tensors(self, out_op, model_inputs):
# find all the operations in the graph between our inputs and outputs
tensor_blacklist = tensors_blocked_by_false(self.learning_phase_ops) # don't follow learning phase branches
dependence_breakers = [k for k in op_handlers if op_handlers[k] == break_dependence]
back_ops = backward_walk_ops(
[out_op], tensor_blacklist,
dependence_breakers
)
start_ops = []
for minput in model_inputs:
for op in minput.consumers():
start_ops.append(op)
self.between_ops = forward_walk_ops(
start_ops,
tensor_blacklist, dependence_breakers,
within_ops=back_ops
)
# note all the tensors that are on the path between the inputs and the output
self.between_tensors = {}
for op in self.between_ops:
for t in op.outputs:
self.between_tensors[t.name] = True
for t in model_inputs:
self.between_tensors[t.name] = True
# save what types are being used
self.used_types = {}
for op in self.between_ops:
self.used_types[op.type] = True
def _variable_inputs(self, op):
""" Return which inputs of this operation are variable (i.e. depend on the model inputs).
"""
if op not in self._vinputs:
out = np.zeros(len(op.inputs), dtype=np.bool)
for i,t in enumerate(op.inputs):
out[i] = t.name in self.between_tensors
self._vinputs[op] = out
return self._vinputs[op]
def phi_symbolic(self, i):
""" Get the SHAP value computation graph for a given model output.
"""
if self.phi_symbolics[i] is None:
if not tf.executing_eagerly():
def anon():
out = self.model_output[:,i] if self.multi_output else self.model_output
return tf.gradients(out, self.model_inputs)
self.phi_symbolics[i] = self.execute_with_overridden_gradients(anon)
else:
@tf.function
def grad_graph(shap_rAnD):
phase = tf.keras.backend.learning_phase()
tf.keras.backend.set_learning_phase(0)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(shap_rAnD)
out = self.model(shap_rAnD)
if self.multi_output:
out = out[:,i]
self._init_between_tensors(out.op, shap_rAnD)
x_grad = tape.gradient(out, shap_rAnD)
tf.keras.backend.set_learning_phase(phase)
return x_grad
self.phi_symbolics[i] = grad_graph
return self.phi_symbolics[i]
def shap_values(self, X, ranked_outputs=None, output_rank_order="max", check_additivity=True):
# check if we have multiple inputs
if not self.multi_input:
if type(X) == list and len(X) != 1:
assert False, "Expected a single tensor as model input!"
elif type(X) != list:
X = [X]
else:
assert type(X) == list, "Expected a list of model inputs!"
assert len(self.model_inputs) == len(X), "Number of model inputs (%d) does not match the number given (%d)!" % (len(self.model_inputs), len(X))
# rank and determine the model outputs that we will explain
if ranked_outputs is not None and self.multi_output:
if not tf.executing_eagerly():
model_output_values = self.run(self.model_output, self.model_inputs, X)
else:
model_output_values = self.model(X)
if output_rank_order == "max":
model_output_ranks = np.argsort(-model_output_values)
elif output_rank_order == "min":
model_output_ranks = np.argsort(model_output_values)
elif output_rank_order == "max_abs":
model_output_ranks = np.argsort(np.abs(model_output_values))
else:
assert False, "output_rank_order must be max, min, or max_abs!"
model_output_ranks = model_output_ranks[:,:ranked_outputs]
else:
model_output_ranks = np.tile(np.arange(len(self.phi_symbolics)), (X[0].shape[0], 1))
# compute the attributions
output_phis = []
for i in range(model_output_ranks.shape[1]):
phis = []
for k in range(len(X)):
phis.append(np.zeros(X[k].shape))
for j in range(X[0].shape[0]):
if (hasattr(self.data, '__call__')):
bg_data = self.data([X[l][j] for l in range(len(X))])
if type(bg_data) != list:
bg_data = [bg_data]
else:
bg_data = self.data
# tile the inputs to line up with the background data samples
tiled_X = [np.tile(X[l][j:j+1], (bg_data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape)-1)])) for l in range(len(X))]
# we use the first sample for the current sample and the rest for the references
joint_input = [np.concatenate([tiled_X[l], bg_data[l]], 0) for l in range(len(X))]
# run attribution computation graph
feature_ind = model_output_ranks[j,i]
sample_phis = self.run(self.phi_symbolic(feature_ind), self.model_inputs, joint_input)
# assign the attributions to the right part of the output arrays
for l in range(len(X)):
phis[l][j] = (sample_phis[l][bg_data[l].shape[0]:] * (X[l][j] - bg_data[l])).mean(0)
output_phis.append(phis[0] if not self.multi_input else phis)
# check that the SHAP values sum up to the model output
if check_additivity:
if not tf.executing_eagerly():
model_output = self.run(self.model_output, self.model_inputs, X)
else:
model_output = self.model(X)
for l in range(len(self.expected_value)):
if not self.multi_input:
diffs = model_output[:, l] - self.expected_value[l] - output_phis[l].sum(axis=tuple(range(1, output_phis[l].ndim)))
else:
diffs = model_output[:, l] - self.expected_value[l]
for i in range(len(output_phis[l])):
diffs -= output_phis[l][i].sum(axis=tuple(range(1, output_phis[l][i].ndim)))
assert np.abs(diffs).max() < 1e-2, "The SHAP explanations do not sum up to the model's output! This is either because of a " \
"rounding error or because an operator in your computation graph was not fully supported. If " \
"the sum difference of %f is significant compared the scale of your model outputs please post " \
"as a github issue, with a reproducable example if possible so we can debug it." % np.abs(diffs).max()
if not self.multi_output:
return output_phis[0]
elif ranked_outputs is not None:
return output_phis, model_output_ranks
else:
return output_phis
def run(self, out, model_inputs, X):
""" Runs the model while also setting the learning phase flags to False.
"""
if not tf.executing_eagerly():
feed_dict = dict(zip(model_inputs, X))
for t in self.learning_phase_flags:
feed_dict[t] = False
return self.session.run(out, feed_dict)
else:
def anon():
tf_execute.record_gradient = custom_record_gradient
# build inputs that are correctly shaped, typed, and tf-wrapped
inputs = []
for i in range(len(X)):
shape = list(self.model_inputs[i].shape)
shape[0] = -1
data = X[i].reshape(shape)
v = tf.constant(data, dtype=self.model_inputs[i].dtype)
inputs.append(v)
final_out = out(inputs)
try:
tf_execute.record_gradient = tf_backprop._record_gradient # old TF2 versions
except AttributeError:
tf_execute.record_gradient = tf_backprop.record_gradient
return final_out
return self.execute_with_overridden_gradients(anon)
def custom_grad(self, op, *grads):
""" Passes a gradient op creation request to the correct handler.
"""
type_name = op.type[5:] if op.type.startswith("shap_") else op.type
out = op_handlers[type_name](self, op, *grads) # we cut off the shap_ prefex before the lookup
return out
def execute_with_overridden_gradients(self, f):
# replace the gradients for all the non-linear activations
# we do this by hacking our way into the registry (TODO: find a public API for this if it exists)
reg = tf_ops._gradient_registry._registry
ops_not_in_registry = ['TensorListReserve']
# NOTE: location_tag taken from tensorflow source for None type ops
location_tag = ("UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN")
# TODO: unclear why some ops are not in the registry with TF 2.0 like TensorListReserve
for non_reg_ops in ops_not_in_registry:
reg[non_reg_ops] = {'type': None, 'location': location_tag}
for n in op_handlers:
if n in reg:
self.orig_grads[n] = reg[n]["type"]
reg["shap_"+n] = {
"type": self.custom_grad,
"location": reg[n]["location"]
}
reg[n]["type"] = self.custom_grad
# In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped
# unfortunately that includes the index of embedding layers so we disable that check here
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable
tf_gradients_impl._IsBackpropagatable = lambda tensor: True
# define the computation graph for the attribution values using a custom gradient-like computation
try:
out = f()
finally:
# reinstate the backpropagatable check
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable
# restore the original gradient definitions
for n in op_handlers:
if n in reg:
del reg["shap_"+n]
reg[n]["type"] = self.orig_grads[n]
for non_reg_ops in ops_not_in_registry:
del reg[non_reg_ops]
if not tf.executing_eagerly():
return out
else:
return [v.numpy() for v in out]
def tensors_blocked_by_false(ops):
""" Follows a set of ops assuming their value is False and find blocked Switch paths.
This is used to prune away parts of the model graph that are only used during the training
phase (like dropout, batch norm, etc.).
"""
blocked = []
def recurse(op):
if op.type == "Switch":
blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False
else:
for out in op.outputs:
for c in out.consumers():
recurse(c)
for op in ops:
recurse(op)
return blocked
def backward_walk_ops(start_ops, tensor_blacklist, op_type_blacklist):
found_ops = []
op_stack = [op for op in start_ops]
while len(op_stack) > 0:
op = op_stack.pop()
if op.type not in op_type_blacklist and op not in found_ops:
found_ops.append(op)
for input in op.inputs:
if input not in tensor_blacklist:
op_stack.append(input.op)
return found_ops
def forward_walk_ops(start_ops, tensor_blacklist, op_type_blacklist, within_ops):
found_ops = []
op_stack = [op for op in start_ops]
while len(op_stack) > 0:
op = op_stack.pop()
if op.type not in op_type_blacklist and op in within_ops and op not in found_ops:
found_ops.append(op)
for out in op.outputs:
if out not in tensor_blacklist:
for c in out.consumers():
op_stack.append(c)
return found_ops
def softmax(explainer, op, *grads):
""" Just decompose softmax into its components and recurse, we can handle all of them :)
We assume the 'axis' is the last dimension because the TF codebase swaps the 'axis' to
the last dimension before the softmax op if 'axis' is not already the last dimension.
We also don't subtract the max before tf.exp for numerical stability since that might
mess up the attributions and it seems like TensorFlow doesn't define softmax that way
(according to the docs)
"""
in0 = op.inputs[0]
in0_max = tf.reduce_max(in0, axis=-1, keepdims=True, name="in0_max")
in0_centered = in0 - in0_max
evals = tf.exp(in0_centered, name="custom_exp")
rsum = tf.reduce_sum(evals, axis=-1, keepdims=True)
div = evals / rsum
# mark these as in-between the inputs and outputs
for op in [evals.op, rsum.op, div.op, in0_centered.op]:
for t in op.outputs:
if t.name not in explainer.between_tensors:
explainer.between_tensors[t.name] = False
out = tf.gradients(div, in0_centered, grad_ys=grads[0])[0]
# remove the names we just added
for op in [evals.op, rsum.op, div.op, in0_centered.op]:
for t in op.outputs:
if explainer.between_tensors[t.name] is False:
del explainer.between_tensors[t.name]
# rescale to account for our shift by in0_max (which we did for numerical stability)
xin0,rin0 = tf.split(in0, 2)
xin0_centered,rin0_centered = tf.split(in0_centered, 2)
delta_in0 = xin0 - rin0
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
return tf.where(
tf.tile(tf.abs(delta_in0), dup0) < 1e-6,
out,
out * tf.tile((xin0_centered - rin0_centered) / delta_in0, dup0)
)
def maxpool(explainer, op, *grads):
xin0,rin0 = tf.split(op.inputs[0], 2)
xout,rout = tf.split(op.outputs[0], 2)
delta_in0 = xin0 - rin0
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
cross_max = tf.maximum(xout, rout)
diffs = tf.concat([cross_max - rout, xout - cross_max], 0)
if op.type.startswith("shap_"):
op.type = op.type[5:]
xmax_pos,rmax_pos = tf.split(explainer.orig_grads[op.type](op, grads[0] * diffs), 2)
return tf.tile(tf.where(
tf.abs(delta_in0) < 1e-7,
tf.zeros_like(delta_in0),
(xmax_pos + rmax_pos) / delta_in0
), dup0)
def gather(explainer, op, *grads):
#params = op.inputs[0]
indices = op.inputs[1]
#axis = op.inputs[2]
var = explainer._variable_inputs(op)
if var[1] and not var[0]:
assert len(indices.shape) == 2, "Only scalar indices supported right now in GatherV2!"
xin1,rin1 = tf.split(tf.cast(op.inputs[1], tf.float32), 2)
xout,rout = tf.split(op.outputs[0], 2)
dup_in1 = [2] + [1 for i in xin1.shape[1:]]
dup_out = [2] + [1 for i in xout.shape[1:]]
delta_in1_t = tf.tile(xin1 - rin1, dup_in1)
out_sum = tf.reduce_sum(grads[0] * tf.tile(xout - rout, dup_out), list(range(len(indices.shape), len(grads[0].shape))))
if op.type == "ResourceGather":
return [None, tf.where(
tf.abs(delta_in1_t) < 1e-6,
tf.zeros_like(delta_in1_t),
out_sum / delta_in1_t
)]
return [None, tf.where(
tf.abs(delta_in1_t) < 1e-6,
tf.zeros_like(delta_in1_t),
out_sum / delta_in1_t
), None]
elif var[0] and not var[1]:
if op.type.startswith("shap_"):
op.type = op.type[5:]
return [explainer.orig_grads[op.type](op, grads[0]), None] # linear in this case
else:
assert False, "Axis not yet supported to be varying for gather op!"
def linearity_1d_nonlinearity_2d(input_ind0, input_ind1, op_func):
def handler(explainer, op, *grads):
var = explainer._variable_inputs(op)
if var[input_ind0] and not var[input_ind1]:
return linearity_1d_handler(input_ind0, explainer, op, *grads)
elif var[input_ind1] and not var[input_ind0]:
return linearity_1d_handler(input_ind1, explainer, op, *grads)
elif var[input_ind0] and var[input_ind1]:
return nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads)
else:
return [None for _ in op.inputs] # no inputs vary, we must be hidden by a switch function
return handler
def nonlinearity_1d_nonlinearity_2d(input_ind0, input_ind1, op_func):
def handler(explainer, op, *grads):
var = explainer._variable_inputs(op)
if var[input_ind0] and not var[input_ind1]:
return nonlinearity_1d_handler(input_ind0, explainer, op, *grads)
elif var[input_ind1] and not var[input_ind0]:
return nonlinearity_1d_handler(input_ind1, explainer, op, *grads)
elif var[input_ind0] and var[input_ind1]:
return nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads)
else:
return [None for _ in op.inputs] # no inputs vary, we must be hidden by a switch function
return handler
def nonlinearity_1d(input_ind):
def handler(explainer, op, *grads):
return nonlinearity_1d_handler(input_ind, explainer, op, *grads)
return handler
def nonlinearity_1d_handler(input_ind, explainer, op, *grads):
# make sure only the given input varies
op_inputs = op.inputs
if op_inputs is None:
op_inputs = op.outputs[0].op.inputs
for i in range(len(op_inputs)):
if i != input_ind:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
xin0, rin0 = tf.split(op_inputs[input_ind], 2)
xout, rout = tf.split(op.outputs[input_ind], 2)
delta_in0 = xin0 - rin0
if delta_in0.shape is None:
dup0 = [2, 1]
else:
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
out = [None for _ in op_inputs]
if op.type.startswith("shap_"):
op.type = op.type[5:]
orig_grad = explainer.orig_grads[op.type](op, grads[0])
out[input_ind] = tf.where(
tf.tile(tf.abs(delta_in0), dup0) < 1e-6,
orig_grad[input_ind] if len(op_inputs) > 1 else orig_grad,
grads[0] * tf.tile((xout - rout) / delta_in0, dup0)
)
return out
def nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads):
assert input_ind0 == 0 and input_ind1 == 1, "TODO: Can't yet handle double inputs that are not first!"
xout,rout = tf.split(op.outputs[0], 2)
in0 = op.inputs[input_ind0]
in1 = op.inputs[input_ind1]
xin0,rin0 = tf.split(in0, 2)
xin1,rin1 = tf.split(in1, 2)
delta_in0 = xin0 - rin0
delta_in1 = xin1 - rin1
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
out10 = op_func(xin0, rin1)
out01 = op_func(rin0, xin1)
out11,out00 = xout,rout
out0 = 0.5 * (out11 - out01 + out10 - out00)
out0 = grads[0] * tf.tile(out0 / delta_in0, dup0)
out1 = 0.5 * (out11 - out10 + out01 - out00)
out1 = grads[0] * tf.tile(out1 / delta_in1, dup0)
# Avoid divide by zero nans
out0 = tf.where(tf.abs(tf.tile(delta_in0, dup0)) < 1e-7, tf.zeros_like(out0), out0)
out1 = tf.where(tf.abs(tf.tile(delta_in1, dup0)) < 1e-7, tf.zeros_like(out1), out1)
# see if due to broadcasting our gradient shapes don't match our input shapes
if (np.any(np.array(out1.shape) != np.array(in1.shape))):
broadcast_index = np.where(np.array(out1.shape) != np.array(in1.shape))[0][0]
out1 = tf.reduce_sum(out1, axis=broadcast_index, keepdims=True)
elif (np.any(np.array(out0.shape) != np.array(in0.shape))):
broadcast_index = np.where(np.array(out0.shape) != np.array(in0.shape))[0][0]
out0 = tf.reduce_sum(out0, axis=broadcast_index, keepdims=True)
return [out0, out1]
def linearity_1d(input_ind):
def handler(explainer, op, *grads):
return linearity_1d_handler(input_ind, explainer, op, *grads)
return handler
def linearity_1d_handler(input_ind, explainer, op, *grads):
# make sure only the given input varies (negative means only that input cannot vary, and is measured from the end of the list)
for i in range(len(op.inputs)):
if i != input_ind:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def linearity_with_excluded(input_inds):
def handler(explainer, op, *grads):
return linearity_with_excluded_handler(input_inds, explainer, op, *grads)
return handler
def linearity_with_excluded_handler(input_inds, explainer, op, *grads):
# make sure the given inputs don't vary (negative is measured from the end of the list)
for i in range(len(op.inputs)):
if i in input_inds or i - len(op.inputs) in input_inds:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def passthrough(explainer, op, *grads):
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def break_dependence(explainer, op, *grads):
""" This function name is used to break attribution dependence in the graph traversal.
These operation types may be connected above input data values in the graph but their outputs
don't depend on the input values (for example they just depend on the shape).
"""
return [None for _ in op.inputs]
op_handlers = {}
# ops that are always linear
op_handlers["Identity"] = passthrough
op_handlers["StridedSlice"] = passthrough
op_handlers["Squeeze"] = passthrough
op_handlers["ExpandDims"] = passthrough
op_handlers["Pack"] = passthrough
op_handlers["BiasAdd"] = passthrough
op_handlers["Unpack"] = passthrough
op_handlers["Add"] = passthrough
op_handlers["Sub"] = passthrough
op_handlers["Merge"] = passthrough
op_handlers["Sum"] = passthrough
op_handlers["Mean"] = passthrough
op_handlers["Cast"] = passthrough
op_handlers["Transpose"] = passthrough
op_handlers["Enter"] = passthrough
op_handlers["Exit"] = passthrough
op_handlers["NextIteration"] = passthrough
op_handlers["Tile"] = passthrough
op_handlers["TensorArrayScatterV3"] = passthrough
op_handlers["TensorArrayReadV3"] = passthrough
op_handlers["TensorArrayWriteV3"] = passthrough
# NEW
op_handlers["AddV2"] = passthrough
op_handlers["StatelessWhile"] = passthrough
op_handlers["TensorListStack"] = passthrough
op_handlers["StatelessWhile"] = passthrough
op_handlers["TensorListFromTensor"] = passthrough
# ops that don't pass any attributions to their inputs
op_handlers["Shape"] = break_dependence
op_handlers["RandomUniform"] = break_dependence
op_handlers["ZerosLike"] = break_dependence
#op_handlers["StopGradient"] = break_dependence # this allows us to stop attributions when we want to (like softmax re-centering)
# ops that are linear and only allow a single input to vary
op_handlers["Reshape"] = linearity_1d(0)
op_handlers["Pad"] = linearity_1d(0)
op_handlers["ReverseV2"] = linearity_1d(0)
op_handlers["ConcatV2"] = linearity_with_excluded([-1])
op_handlers["Conv2D"] = linearity_1d(0)
op_handlers["Switch"] = linearity_1d(0)
op_handlers["AvgPool"] = linearity_1d(0)
op_handlers["FusedBatchNorm"] = linearity_1d(0)
# ops that are nonlinear and only allow a single input to vary
op_handlers["Relu"] = nonlinearity_1d(0)
op_handlers["Elu"] = nonlinearity_1d(0)
op_handlers["Sigmoid"] = nonlinearity_1d(0)
op_handlers["Tanh"] = nonlinearity_1d(0)
op_handlers["Softplus"] = nonlinearity_1d(0)
op_handlers["Exp"] = nonlinearity_1d(0)
op_handlers["ClipByValue"] = nonlinearity_1d(0)
op_handlers["Rsqrt"] = nonlinearity_1d(0)
op_handlers["Square"] = nonlinearity_1d(0)
op_handlers["Max"] = nonlinearity_1d(0)
# NEW
op_handlers["Sin"] = nonlinearity_1d(0)
# ops that are nonlinear and allow two inputs to vary
op_handlers["SquaredDifference"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: (x - y) * (x - y))
op_handlers["Minimum"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.minimum(x, y))
op_handlers["Maximum"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.maximum(x, y))
# ops that allow up to two inputs to vary are are linear when only one input varies
op_handlers["Mul"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: x * y)
op_handlers["RealDiv"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: x / y)
op_handlers["MatMul"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.matmul(x, y))
# ops that need their own custom attribution functions
op_handlers["GatherV2"] = gather
op_handlers["ResourceGather"] = gather
op_handlers["MaxPool"] = maxpool
op_handlers["Softmax"] = softmax
# TODO items
# TensorArrayGatherV3
# Max
# TensorArraySizeV3
# Range
| 45.208974 | 198 | 0.636588 | import numpy as np
import warnings
from .._explainer import Explainer
from packaging import version
from ..tf_utils import _get_session, _get_graph, _get_model_inputs, _get_model_output
keras = None
tf = None
tf_ops = None
tf_backprop = None
tf_execute = None
tf_gradients_impl = None
def custom_record_gradient(op_name, inputs, attrs, results):
reset_input = False
if op_name == "ResourceGather" and inputs[1].dtype == tf.int32:
inputs[1].__dict__["_dtype"] = tf.float32
reset_input = True
try:
out = tf_backprop._record_gradient("shap_"+op_name, inputs, attrs, results)
except AttributeError:
out = tf_backprop.record_gradient("shap_"+op_name, inputs, attrs, results)
if reset_input:
inputs[1].__dict__["_dtype"] = tf.int32
return out
class TFDeep(Explainer):
def __init__(self, model, data, session=None, learning_phase_flags=None):
global tf, tf_ops, tf_backprop, tf_execute, tf_gradients_impl
if tf is None:
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.ops import gradients_impl as tf_gradients_impl
from tensorflow.python.eager import backprop as tf_backprop
from tensorflow.python.eager import execute as tf_execute
if not hasattr(tf_gradients_impl, "_IsBackpropagatable"):
from tensorflow.python.ops import gradients_util as tf_gradients_impl
import tensorflow as tf
if version.parse(tf.__version__) < version.parse("1.4.0"):
warnings.warn("Your TensorFlow version is older than 1.4.0 and not supported.")
global keras
if keras is None:
try:
import keras
warnings.warn("keras is no longer supported, please use tf.keras instead.")
except:
pass
if version.parse(tf.__version__) >= version.parse("2.4.0"):
warnings.warn("Your TensorFlow version is newer than 2.4.0 and so graph support has been removed in eager mode and some static graphs may not be supported. See PR #1483 for discussion.")
self.model_inputs = _get_model_inputs(model)
self.model_output = _get_model_output(model)
assert type(self.model_output) != list, "The model output to be explained must be a single tensor!"
assert len(self.model_output.shape) < 3, "The model output must be a vector or a single value!"
self.multi_output = True
if len(self.model_output.shape) == 1:
self.multi_output = False
if tf.executing_eagerly():
if type(model) is tuple or type(model) is list:
assert len(model) == 2, "When a tuple is passed it must be of the form (inputs, outputs)"
from tensorflow.keras import Model
self.model = Model(model[0], model[1])
else:
self.model = model
self.multi_input = True
if type(self.model_inputs) != list or len(self.model_inputs) == 1:
self.multi_input = False
if type(self.model_inputs) != list:
self.model_inputs = [self.model_inputs]
if type(data) != list and (hasattr(data, '__call__')==False):
data = [data]
self.data = data
self._vinputs = {}
self.orig_grads = {}
if not tf.executing_eagerly():
self.session = _get_session(session)
self.graph = _get_graph(self)
if learning_phase_flags is None:
self.learning_phase_ops = []
for op in self.graph.get_operations():
if 'learning_phase' in op.name and op.type == "Const" and len(op.outputs[0].shape) == 0:
if op.outputs[0].dtype == tf.bool:
self.learning_phase_ops.append(op)
self.learning_phase_flags = [op.outputs[0] for op in self.learning_phase_ops]
else:
self.learning_phase_ops = [t.op for t in learning_phase_flags]
if (hasattr(self.data, '__call__')):
self.expected_value = None
else:
if self.data[0].shape[0] > 5000:
warnings.warn("You have provided over 5k background samples! For better performance consider using smaller random sample.")
if not tf.executing_eagerly():
self.expected_value = self.run(self.model_output, self.model_inputs, self.data).mean(0)
else:
if type(self.model)is tuple:
sel.fModel(cnn.inputs, cnn.get_layer(theNameYouWant).outputs)
self.expected_value = tf.reduce_mean(self.model(self.data), 0)
if not tf.executing_eagerly():
self._init_between_tensors(self.model_output.op, self.model_inputs)
if not self.multi_output:
self.phi_symbolics = [None]
else:
noutputs = self.model_output.shape.as_list()[1]
if noutputs is not None:
self.phi_symbolics = [None for i in range(noutputs)]
else:
raise Exception("The model output tensor to be explained cannot have a static shape in dim 1 of None!")
def _get_model_output(self, model):
if len(model.layers[-1]._inbound_nodes) == 0:
if len(model.outputs) > 1:
warnings.warn("Only one model output supported.")
return model.outputs[0]
else:
return model.layers[-1].output
def _init_between_tensors(self, out_op, model_inputs):
tensor_blacklist = tensors_blocked_by_false(self.learning_phase_ops)
dependence_breakers = [k for k in op_handlers if op_handlers[k] == break_dependence]
back_ops = backward_walk_ops(
[out_op], tensor_blacklist,
dependence_breakers
)
start_ops = []
for minput in model_inputs:
for op in minput.consumers():
start_ops.append(op)
self.between_ops = forward_walk_ops(
start_ops,
tensor_blacklist, dependence_breakers,
within_ops=back_ops
)
# note all the tensors that are on the path between the inputs and the output
self.between_tensors = {}
for op in self.between_ops:
for t in op.outputs:
self.between_tensors[t.name] = True
for t in model_inputs:
self.between_tensors[t.name] = True
# save what types are being used
self.used_types = {}
for op in self.between_ops:
self.used_types[op.type] = True
def _variable_inputs(self, op):
if op not in self._vinputs:
out = np.zeros(len(op.inputs), dtype=np.bool)
for i,t in enumerate(op.inputs):
out[i] = t.name in self.between_tensors
self._vinputs[op] = out
return self._vinputs[op]
def phi_symbolic(self, i):
if self.phi_symbolics[i] is None:
if not tf.executing_eagerly():
def anon():
out = self.model_output[:,i] if self.multi_output else self.model_output
return tf.gradients(out, self.model_inputs)
self.phi_symbolics[i] = self.execute_with_overridden_gradients(anon)
else:
@tf.function
def grad_graph(shap_rAnD):
phase = tf.keras.backend.learning_phase()
tf.keras.backend.set_learning_phase(0)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(shap_rAnD)
out = self.model(shap_rAnD)
if self.multi_output:
out = out[:,i]
self._init_between_tensors(out.op, shap_rAnD)
x_grad = tape.gradient(out, shap_rAnD)
tf.keras.backend.set_learning_phase(phase)
return x_grad
self.phi_symbolics[i] = grad_graph
return self.phi_symbolics[i]
def shap_values(self, X, ranked_outputs=None, output_rank_order="max", check_additivity=True):
# check if we have multiple inputs
if not self.multi_input:
if type(X) == list and len(X) != 1:
assert False, "Expected a single tensor as model input!"
elif type(X) != list:
X = [X]
else:
assert type(X) == list, "Expected a list of model inputs!"
assert len(self.model_inputs) == len(X), "Number of model inputs (%d) does not match the number given (%d)!" % (len(self.model_inputs), len(X))
# rank and determine the model outputs that we will explain
if ranked_outputs is not None and self.multi_output:
if not tf.executing_eagerly():
model_output_values = self.run(self.model_output, self.model_inputs, X)
else:
model_output_values = self.model(X)
if output_rank_order == "max":
model_output_ranks = np.argsort(-model_output_values)
elif output_rank_order == "min":
model_output_ranks = np.argsort(model_output_values)
elif output_rank_order == "max_abs":
model_output_ranks = np.argsort(np.abs(model_output_values))
else:
assert False, "output_rank_order must be max, min, or max_abs!"
model_output_ranks = model_output_ranks[:,:ranked_outputs]
else:
model_output_ranks = np.tile(np.arange(len(self.phi_symbolics)), (X[0].shape[0], 1))
# compute the attributions
output_phis = []
for i in range(model_output_ranks.shape[1]):
phis = []
for k in range(len(X)):
phis.append(np.zeros(X[k].shape))
for j in range(X[0].shape[0]):
if (hasattr(self.data, '__call__')):
bg_data = self.data([X[l][j] for l in range(len(X))])
if type(bg_data) != list:
bg_data = [bg_data]
else:
bg_data = self.data
# tile the inputs to line up with the background data samples
tiled_X = [np.tile(X[l][j:j+1], (bg_data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape)-1)])) for l in range(len(X))]
# we use the first sample for the current sample and the rest for the references
joint_input = [np.concatenate([tiled_X[l], bg_data[l]], 0) for l in range(len(X))]
# run attribution computation graph
feature_ind = model_output_ranks[j,i]
sample_phis = self.run(self.phi_symbolic(feature_ind), self.model_inputs, joint_input)
# assign the attributions to the right part of the output arrays
for l in range(len(X)):
phis[l][j] = (sample_phis[l][bg_data[l].shape[0]:] * (X[l][j] - bg_data[l])).mean(0)
output_phis.append(phis[0] if not self.multi_input else phis)
# check that the SHAP values sum up to the model output
if check_additivity:
if not tf.executing_eagerly():
model_output = self.run(self.model_output, self.model_inputs, X)
else:
model_output = self.model(X)
for l in range(len(self.expected_value)):
if not self.multi_input:
diffs = model_output[:, l] - self.expected_value[l] - output_phis[l].sum(axis=tuple(range(1, output_phis[l].ndim)))
else:
diffs = model_output[:, l] - self.expected_value[l]
for i in range(len(output_phis[l])):
diffs -= output_phis[l][i].sum(axis=tuple(range(1, output_phis[l][i].ndim)))
assert np.abs(diffs).max() < 1e-2, "The SHAP explanations do not sum up to the model's output! This is either because of a " \
"rounding error or because an operator in your computation graph was not fully supported. If " \
"the sum difference of %f is significant compared the scale of your model outputs please post " \
"as a github issue, with a reproducable example if possible so we can debug it." % np.abs(diffs).max()
if not self.multi_output:
return output_phis[0]
elif ranked_outputs is not None:
return output_phis, model_output_ranks
else:
return output_phis
def run(self, out, model_inputs, X):
if not tf.executing_eagerly():
feed_dict = dict(zip(model_inputs, X))
for t in self.learning_phase_flags:
feed_dict[t] = False
return self.session.run(out, feed_dict)
else:
def anon():
tf_execute.record_gradient = custom_record_gradient
inputs = []
for i in range(len(X)):
shape = list(self.model_inputs[i].shape)
shape[0] = -1
data = X[i].reshape(shape)
v = tf.constant(data, dtype=self.model_inputs[i].dtype)
inputs.append(v)
final_out = out(inputs)
try:
tf_execute.record_gradient = tf_backprop._record_gradient
except AttributeError:
tf_execute.record_gradient = tf_backprop.record_gradient
return final_out
return self.execute_with_overridden_gradients(anon)
def custom_grad(self, op, *grads):
type_name = op.type[5:] if op.type.startswith("shap_") else op.type
out = op_handlers[type_name](self, op, *grads)
return out
def execute_with_overridden_gradients(self, f):
reg = tf_ops._gradient_registry._registry
ops_not_in_registry = ['TensorListReserve']
location_tag = ("UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN")
for non_reg_ops in ops_not_in_registry:
reg[non_reg_ops] = {'type': None, 'location': location_tag}
for n in op_handlers:
if n in reg:
self.orig_grads[n] = reg[n]["type"]
reg["shap_"+n] = {
"type": self.custom_grad,
"location": reg[n]["location"]
}
reg[n]["type"] = self.custom_grad
# unfortunately that includes the index of embedding layers so we disable that check here
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable
tf_gradients_impl._IsBackpropagatable = lambda tensor: True
# define the computation graph for the attribution values using a custom gradient-like computation
try:
out = f()
finally:
# reinstate the backpropagatable check
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable
# restore the original gradient definitions
for n in op_handlers:
if n in reg:
del reg["shap_"+n]
reg[n]["type"] = self.orig_grads[n]
for non_reg_ops in ops_not_in_registry:
del reg[non_reg_ops]
if not tf.executing_eagerly():
return out
else:
return [v.numpy() for v in out]
def tensors_blocked_by_false(ops):
blocked = []
def recurse(op):
if op.type == "Switch":
blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False
else:
for out in op.outputs:
for c in out.consumers():
recurse(c)
for op in ops:
recurse(op)
return blocked
def backward_walk_ops(start_ops, tensor_blacklist, op_type_blacklist):
found_ops = []
op_stack = [op for op in start_ops]
while len(op_stack) > 0:
op = op_stack.pop()
if op.type not in op_type_blacklist and op not in found_ops:
found_ops.append(op)
for input in op.inputs:
if input not in tensor_blacklist:
op_stack.append(input.op)
return found_ops
def forward_walk_ops(start_ops, tensor_blacklist, op_type_blacklist, within_ops):
found_ops = []
op_stack = [op for op in start_ops]
while len(op_stack) > 0:
op = op_stack.pop()
if op.type not in op_type_blacklist and op in within_ops and op not in found_ops:
found_ops.append(op)
for out in op.outputs:
if out not in tensor_blacklist:
for c in out.consumers():
op_stack.append(c)
return found_ops
def softmax(explainer, op, *grads):
in0 = op.inputs[0]
in0_max = tf.reduce_max(in0, axis=-1, keepdims=True, name="in0_max")
in0_centered = in0 - in0_max
evals = tf.exp(in0_centered, name="custom_exp")
rsum = tf.reduce_sum(evals, axis=-1, keepdims=True)
div = evals / rsum
# mark these as in-between the inputs and outputs
for op in [evals.op, rsum.op, div.op, in0_centered.op]:
for t in op.outputs:
if t.name not in explainer.between_tensors:
explainer.between_tensors[t.name] = False
out = tf.gradients(div, in0_centered, grad_ys=grads[0])[0]
# remove the names we just added
for op in [evals.op, rsum.op, div.op, in0_centered.op]:
for t in op.outputs:
if explainer.between_tensors[t.name] is False:
del explainer.between_tensors[t.name]
# rescale to account for our shift by in0_max (which we did for numerical stability)
xin0,rin0 = tf.split(in0, 2)
xin0_centered,rin0_centered = tf.split(in0_centered, 2)
delta_in0 = xin0 - rin0
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
return tf.where(
tf.tile(tf.abs(delta_in0), dup0) < 1e-6,
out,
out * tf.tile((xin0_centered - rin0_centered) / delta_in0, dup0)
)
def maxpool(explainer, op, *grads):
xin0,rin0 = tf.split(op.inputs[0], 2)
xout,rout = tf.split(op.outputs[0], 2)
delta_in0 = xin0 - rin0
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
cross_max = tf.maximum(xout, rout)
diffs = tf.concat([cross_max - rout, xout - cross_max], 0)
if op.type.startswith("shap_"):
op.type = op.type[5:]
xmax_pos,rmax_pos = tf.split(explainer.orig_grads[op.type](op, grads[0] * diffs), 2)
return tf.tile(tf.where(
tf.abs(delta_in0) < 1e-7,
tf.zeros_like(delta_in0),
(xmax_pos + rmax_pos) / delta_in0
), dup0)
def gather(explainer, op, *grads):
#params = op.inputs[0]
indices = op.inputs[1]
#axis = op.inputs[2]
var = explainer._variable_inputs(op)
if var[1] and not var[0]:
assert len(indices.shape) == 2, "Only scalar indices supported right now in GatherV2!"
xin1,rin1 = tf.split(tf.cast(op.inputs[1], tf.float32), 2)
xout,rout = tf.split(op.outputs[0], 2)
dup_in1 = [2] + [1 for i in xin1.shape[1:]]
dup_out = [2] + [1 for i in xout.shape[1:]]
delta_in1_t = tf.tile(xin1 - rin1, dup_in1)
out_sum = tf.reduce_sum(grads[0] * tf.tile(xout - rout, dup_out), list(range(len(indices.shape), len(grads[0].shape))))
if op.type == "ResourceGather":
return [None, tf.where(
tf.abs(delta_in1_t) < 1e-6,
tf.zeros_like(delta_in1_t),
out_sum / delta_in1_t
)]
return [None, tf.where(
tf.abs(delta_in1_t) < 1e-6,
tf.zeros_like(delta_in1_t),
out_sum / delta_in1_t
), None]
elif var[0] and not var[1]:
if op.type.startswith("shap_"):
op.type = op.type[5:]
return [explainer.orig_grads[op.type](op, grads[0]), None] # linear in this case
else:
assert False, "Axis not yet supported to be varying for gather op!"
def linearity_1d_nonlinearity_2d(input_ind0, input_ind1, op_func):
def handler(explainer, op, *grads):
var = explainer._variable_inputs(op)
if var[input_ind0] and not var[input_ind1]:
return linearity_1d_handler(input_ind0, explainer, op, *grads)
elif var[input_ind1] and not var[input_ind0]:
return linearity_1d_handler(input_ind1, explainer, op, *grads)
elif var[input_ind0] and var[input_ind1]:
return nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads)
else:
return [None for _ in op.inputs] # no inputs vary, we must be hidden by a switch function
return handler
def nonlinearity_1d_nonlinearity_2d(input_ind0, input_ind1, op_func):
def handler(explainer, op, *grads):
var = explainer._variable_inputs(op)
if var[input_ind0] and not var[input_ind1]:
return nonlinearity_1d_handler(input_ind0, explainer, op, *grads)
elif var[input_ind1] and not var[input_ind0]:
return nonlinearity_1d_handler(input_ind1, explainer, op, *grads)
elif var[input_ind0] and var[input_ind1]:
return nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads)
else:
return [None for _ in op.inputs] # no inputs vary, we must be hidden by a switch function
return handler
def nonlinearity_1d(input_ind):
def handler(explainer, op, *grads):
return nonlinearity_1d_handler(input_ind, explainer, op, *grads)
return handler
def nonlinearity_1d_handler(input_ind, explainer, op, *grads):
# make sure only the given input varies
op_inputs = op.inputs
if op_inputs is None:
op_inputs = op.outputs[0].op.inputs
for i in range(len(op_inputs)):
if i != input_ind:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
xin0, rin0 = tf.split(op_inputs[input_ind], 2)
xout, rout = tf.split(op.outputs[input_ind], 2)
delta_in0 = xin0 - rin0
if delta_in0.shape is None:
dup0 = [2, 1]
else:
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
out = [None for _ in op_inputs]
if op.type.startswith("shap_"):
op.type = op.type[5:]
orig_grad = explainer.orig_grads[op.type](op, grads[0])
out[input_ind] = tf.where(
tf.tile(tf.abs(delta_in0), dup0) < 1e-6,
orig_grad[input_ind] if len(op_inputs) > 1 else orig_grad,
grads[0] * tf.tile((xout - rout) / delta_in0, dup0)
)
return out
def nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads):
assert input_ind0 == 0 and input_ind1 == 1, "TODO: Can't yet handle double inputs that are not first!"
xout,rout = tf.split(op.outputs[0], 2)
in0 = op.inputs[input_ind0]
in1 = op.inputs[input_ind1]
xin0,rin0 = tf.split(in0, 2)
xin1,rin1 = tf.split(in1, 2)
delta_in0 = xin0 - rin0
delta_in1 = xin1 - rin1
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
out10 = op_func(xin0, rin1)
out01 = op_func(rin0, xin1)
out11,out00 = xout,rout
out0 = 0.5 * (out11 - out01 + out10 - out00)
out0 = grads[0] * tf.tile(out0 / delta_in0, dup0)
out1 = 0.5 * (out11 - out10 + out01 - out00)
out1 = grads[0] * tf.tile(out1 / delta_in1, dup0)
out0 = tf.where(tf.abs(tf.tile(delta_in0, dup0)) < 1e-7, tf.zeros_like(out0), out0)
out1 = tf.where(tf.abs(tf.tile(delta_in1, dup0)) < 1e-7, tf.zeros_like(out1), out1)
if (np.any(np.array(out1.shape) != np.array(in1.shape))):
broadcast_index = np.where(np.array(out1.shape) != np.array(in1.shape))[0][0]
out1 = tf.reduce_sum(out1, axis=broadcast_index, keepdims=True)
elif (np.any(np.array(out0.shape) != np.array(in0.shape))):
broadcast_index = np.where(np.array(out0.shape) != np.array(in0.shape))[0][0]
out0 = tf.reduce_sum(out0, axis=broadcast_index, keepdims=True)
return [out0, out1]
def linearity_1d(input_ind):
def handler(explainer, op, *grads):
return linearity_1d_handler(input_ind, explainer, op, *grads)
return handler
def linearity_1d_handler(input_ind, explainer, op, *grads):
# make sure only the given input varies (negative means only that input cannot vary, and is measured from the end of the list)
for i in range(len(op.inputs)):
if i != input_ind:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def linearity_with_excluded(input_inds):
def handler(explainer, op, *grads):
return linearity_with_excluded_handler(input_inds, explainer, op, *grads)
return handler
def linearity_with_excluded_handler(input_inds, explainer, op, *grads):
# make sure the given inputs don't vary (negative is measured from the end of the list)
for i in range(len(op.inputs)):
if i in input_inds or i - len(op.inputs) in input_inds:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def passthrough(explainer, op, *grads):
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def break_dependence(explainer, op, *grads):
return [None for _ in op.inputs]
op_handlers = {}
op_handlers["Identity"] = passthrough
op_handlers["StridedSlice"] = passthrough
op_handlers["Squeeze"] = passthrough
op_handlers["ExpandDims"] = passthrough
op_handlers["Pack"] = passthrough
op_handlers["BiasAdd"] = passthrough
op_handlers["Unpack"] = passthrough
op_handlers["Add"] = passthrough
op_handlers["Sub"] = passthrough
op_handlers["Merge"] = passthrough
op_handlers["Sum"] = passthrough
op_handlers["Mean"] = passthrough
op_handlers["Cast"] = passthrough
op_handlers["Transpose"] = passthrough
op_handlers["Enter"] = passthrough
op_handlers["Exit"] = passthrough
op_handlers["NextIteration"] = passthrough
op_handlers["Tile"] = passthrough
op_handlers["TensorArrayScatterV3"] = passthrough
op_handlers["TensorArrayReadV3"] = passthrough
op_handlers["TensorArrayWriteV3"] = passthrough
op_handlers["AddV2"] = passthrough
op_handlers["StatelessWhile"] = passthrough
op_handlers["TensorListStack"] = passthrough
op_handlers["StatelessWhile"] = passthrough
op_handlers["TensorListFromTensor"] = passthrough
op_handlers["Shape"] = break_dependence
op_handlers["RandomUniform"] = break_dependence
op_handlers["ZerosLike"] = break_dependence
#op_handlers["StopGradient"] = break_dependence # this allows us to stop attributions when we want to (like softmax re-centering)
# ops that are linear and only allow a single input to vary
op_handlers["Reshape"] = linearity_1d(0)
op_handlers["Pad"] = linearity_1d(0)
op_handlers["ReverseV2"] = linearity_1d(0)
op_handlers["ConcatV2"] = linearity_with_excluded([-1])
op_handlers["Conv2D"] = linearity_1d(0)
op_handlers["Switch"] = linearity_1d(0)
op_handlers["AvgPool"] = linearity_1d(0)
op_handlers["FusedBatchNorm"] = linearity_1d(0)
# ops that are nonlinear and only allow a single input to vary
op_handlers["Relu"] = nonlinearity_1d(0)
op_handlers["Elu"] = nonlinearity_1d(0)
op_handlers["Sigmoid"] = nonlinearity_1d(0)
op_handlers["Tanh"] = nonlinearity_1d(0)
op_handlers["Softplus"] = nonlinearity_1d(0)
op_handlers["Exp"] = nonlinearity_1d(0)
op_handlers["ClipByValue"] = nonlinearity_1d(0)
op_handlers["Rsqrt"] = nonlinearity_1d(0)
op_handlers["Square"] = nonlinearity_1d(0)
op_handlers["Max"] = nonlinearity_1d(0)
# NEW
op_handlers["Sin"] = nonlinearity_1d(0)
# ops that are nonlinear and allow two inputs to vary
op_handlers["SquaredDifference"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: (x - y) * (x - y))
op_handlers["Minimum"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.minimum(x, y))
op_handlers["Maximum"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.maximum(x, y))
# ops that allow up to two inputs to vary are are linear when only one input varies
op_handlers["Mul"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: x * y)
op_handlers["RealDiv"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: x / y)
op_handlers["MatMul"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.matmul(x, y))
# ops that need their own custom attribution functions
op_handlers["GatherV2"] = gather
op_handlers["ResourceGather"] = gather
op_handlers["MaxPool"] = maxpool
op_handlers["Softmax"] = softmax
# TODO items
# TensorArrayGatherV3
# Max
# TensorArraySizeV3
# Range
| true | true |
f731f68560aa545d23d46048a4fc173d215dc10a | 4,303 | py | Python | Univ_individual_files/133_uni_tech_sydney.py | srsarangi/univscanner | 8735628690f7f50662ee5abf14ac1d27a657b613 | [
"Apache-2.0"
] | null | null | null | Univ_individual_files/133_uni_tech_sydney.py | srsarangi/univscanner | 8735628690f7f50662ee5abf14ac1d27a657b613 | [
"Apache-2.0"
] | null | null | null | Univ_individual_files/133_uni_tech_sydney.py | srsarangi/univscanner | 8735628690f7f50662ee5abf14ac1d27a657b613 | [
"Apache-2.0"
] | 2 | 2021-05-18T07:50:15.000Z | 2021-05-18T11:16:04.000Z | import requests
import urllib.request
import time
import urllib
import re
import csv
import sys
from bs4 import BeautifulSoup
def uni_tech_sydney():
url = "https://www.uts.edu.au/about/faculty-engineering-and-information-technology/computer-science/school-computer-science-staff"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
r = requests.get(url, headers=headers)
# getting the soup by parsing the html parsel to text to request r
soup = BeautifulSoup(r.text, "html5lib")
# print(soup.prettify)
# file initialization to write
file_name = sys.argv[0]
# file_name = file_name[4:]
txt_file = file_name.replace(".py", ".txt")
f = open(txt_file, "w")
csv_file = file_name.replace(".py", ".csv")
f2 = open(csv_file, "w")
csvwriter = csv.writer(f2)
overall_file = "all_emails.csv"
f3 = open(overall_file, "a")
csvwriter2 = csv.writer(f3)
u_name = "University of Technology, Sydney"
country = "Australia"
grabage_emails = []
var = [f, csvwriter, csvwriter2, u_name, country, grabage_emails]
# d gives the array of all profs on the dept homepage
dd = soup.find('table', {'class':"staff-list"})
d = dd.find_all('tr')
#iterating for every prof
for i in d:
td = i.find('td', {'class':'table--cell-width-l'})
if td == None:
continue
a = td.find('a')
if a == None:
continue
link = "https://www.uts.edu.au"+a.get('href')
name = (a.get_text()).strip()
name = " ".join(name.split())
name = name.split(',')
name = name[1]+' '+name[0]
# print(name, link)
# check if link is valid on Not
try:
prof_resp = requests.get(link, headers=headers)
except:
continue
email = "Not Found"
print(name, email, link)
filterandgetEmail(var, grabage_emails, name, link, email, prof_resp)
f.close()
f2.close()
f3.close()
print("Finished")
def filterandgetEmail(var, grabage_emails, name, link, email, prof_resp):
f = var[0]
csvwriter = var[1]
csvwriter2 = var[2]
u_name = var[3]
country = var[4]
keyword_list = ['Computer Architecture','hardware and system architecture', 'hardware and architecture', 'Computerarchitectuur', 'embedded system', 'computer organization','VLSI Design', 'Computer and System',
'multiprocessor architecture']
flag = 1
prof_soup = BeautifulSoup(prof_resp.text, "html.parser")
# print(prof_soup)
research_text = prof_soup.text
for pattern in keyword_list:
if re.search(pattern, research_text, re.IGNORECASE):
flag = 0
if email != 'Not Found':
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
new_emails = set(re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}", prof_resp.text))
for eemail in grabage_emails:
if eemail in new_emails:
new_emails.remove(eemail)
if len(new_emails) == 0:
email = "Email Not Found"
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
# f.write(link + '\n' + name)
for email in new_emails:
f.write(link + '\n' + name + '\t\t' + email + '\n')
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
# f.write("\n")
f.write(pattern)
f.write('\n\n')
break
if __name__ == '__main__':
uni_tech_sydney()
| 34.424 | 214 | 0.550314 | import requests
import urllib.request
import time
import urllib
import re
import csv
import sys
from bs4 import BeautifulSoup
def uni_tech_sydney():
url = "https://www.uts.edu.au/about/faculty-engineering-and-information-technology/computer-science/school-computer-science-staff"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "html5lib")
file_name = sys.argv[0]
txt_file = file_name.replace(".py", ".txt")
f = open(txt_file, "w")
csv_file = file_name.replace(".py", ".csv")
f2 = open(csv_file, "w")
csvwriter = csv.writer(f2)
overall_file = "all_emails.csv"
f3 = open(overall_file, "a")
csvwriter2 = csv.writer(f3)
u_name = "University of Technology, Sydney"
country = "Australia"
grabage_emails = []
var = [f, csvwriter, csvwriter2, u_name, country, grabage_emails]
dd = soup.find('table', {'class':"staff-list"})
d = dd.find_all('tr')
for i in d:
td = i.find('td', {'class':'table--cell-width-l'})
if td == None:
continue
a = td.find('a')
if a == None:
continue
link = "https://www.uts.edu.au"+a.get('href')
name = (a.get_text()).strip()
name = " ".join(name.split())
name = name.split(',')
name = name[1]+' '+name[0]
try:
prof_resp = requests.get(link, headers=headers)
except:
continue
email = "Not Found"
print(name, email, link)
filterandgetEmail(var, grabage_emails, name, link, email, prof_resp)
f.close()
f2.close()
f3.close()
print("Finished")
def filterandgetEmail(var, grabage_emails, name, link, email, prof_resp):
f = var[0]
csvwriter = var[1]
csvwriter2 = var[2]
u_name = var[3]
country = var[4]
keyword_list = ['Computer Architecture','hardware and system architecture', 'hardware and architecture', 'Computerarchitectuur', 'embedded system', 'computer organization','VLSI Design', 'Computer and System',
'multiprocessor architecture']
flag = 1
prof_soup = BeautifulSoup(prof_resp.text, "html.parser")
research_text = prof_soup.text
for pattern in keyword_list:
if re.search(pattern, research_text, re.IGNORECASE):
flag = 0
if email != 'Not Found':
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
new_emails = set(re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}", prof_resp.text))
for eemail in grabage_emails:
if eemail in new_emails:
new_emails.remove(eemail)
if len(new_emails) == 0:
email = "Email Not Found"
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
for email in new_emails:
f.write(link + '\n' + name + '\t\t' + email + '\n')
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
f.write(pattern)
f.write('\n\n')
break
if __name__ == '__main__':
uni_tech_sydney()
| true | true |
f731f71d1eeb1bef0d1a1cfd91093af40b01de85 | 7,291 | py | Python | train.py | HustQBW/Single-Object-Localization | 3a6bd87cd75543f55eb3eed12b6d09475f05b8fd | [
"MIT"
] | 1 | 2021-12-21T16:14:21.000Z | 2021-12-21T16:14:21.000Z | train.py | HustQBW/Single-Object-Localization | 3a6bd87cd75543f55eb3eed12b6d09475f05b8fd | [
"MIT"
] | null | null | null | train.py | HustQBW/Single-Object-Localization | 3a6bd87cd75543f55eb3eed12b6d09475f05b8fd | [
"MIT"
] | null | null | null | from dataset import tiny_dataset
from bbox_codec import bbox_encode
from resnet50_base import Localization_net2
from torch.utils.data import DataLoader,random_split
import torch as t
import tqdm
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import torch.optim as optim
import argparse
from loss import Loss_for_localization
from evaluate import compute_three_acc
import os
def parser():
parser = argparse.ArgumentParser()
parser.add_argument('--lr',help='learning rate',type=float,default=1e-2,dest='lr')
parser.add_argument('--batch-size',help='batchsize',type=int,default=32,dest='batch_size')
parser.add_argument('--lr-decay',help='the decay of lr',type=float,default=0.1,dest='lr_decay')
parser.add_argument('--root',help='root directory of dataset',type=str,
default=r'E:\BS_learning\4_1\CV_basis\experiment\2\tiny_vid',dest='root')
parser.add_argument('--weight-decay',help='weight decay of optimizer',type=float,
default=1e-5,dest='weight_decay')
parser.add_argument('--epochs',help='set the num of epochs',type=int,default=100)
parser.add_argument('--log-dir',help='tensorboard log dir',type=str,required=True)
parser.add_argument('--save-file-name', help='the pth file name', type=str,required=True)
parser.add_argument('--class-weight',help='the weight of classification of the loss',default=1,type=int)
parser.add_argument('--regre-weight', help='the weight of regression of the loss', default=2,type=int)
return parser
def weight_init(net):
for name,child in net.named_children():
if name == 'feature_extraction':
continue
if isinstance(child,nn.Conv2d):
nn.init.kaiming_normal_(child.weight)
if child.bias != None:
nn.init.zeros_(child.bias)
elif isinstance(child,nn.Linear):
nn.init.kaiming_normal_(child.weight)
if child.bias != None:
nn.init.zeros_(child.bias)
return net
def train():
args = parser().parse_args()
t.manual_seed(777)
t.cuda.manual_seed(777)
dataset = tiny_dataset(root=args.root)
train_set,val_set = random_split(dataset=dataset,lengths=[150*5,30*5],
generator=t.Generator().manual_seed(777))
train_loader = DataLoader(dataset=train_set,batch_size=args.batch_size,shuffle=True,num_workers=2)
val_loader = DataLoader(dataset=val_set,batch_size=1,shuffle=False,num_workers=0)
print('establish the net ...')
net = Localization_net2(class_num=5).cuda()
print('initialize the net')
net = weight_init(net=net)
high_lr_list = []
low_lr_list = []
for name,param in net.named_parameters():
if 'feature_extraction' in name:
low_lr_list.append(param)
else:
high_lr_list.append(param)
optimizer = optim.SGD([{'params':low_lr_list,'lr':0.1*args.lr},{'params':high_lr_list}],
lr=args.lr,weight_decay=args.weight_decay,momentum=0.9)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
# mode='min', patience=2,factor=args.lr_decay)
writer = SummaryWriter(log_dir=args.log_dir,comment='curves_log')
criterion = Loss_for_localization().cuda()
for i in tqdm.tqdm(range(args.epochs)):
t_loss = 0.
tc_acc = 0.
tr_acc = 0.
t_acc = 0.
v_loss = 0.
vc_acc = 0.
vr_acc = 0.
v_acc = 0.
print('\n%dth epoch'%(i+1))
if i+1 == args.epochs//4:
optimizer.param_groups[0]['lr'] *= args.lr_decay
optimizer.param_groups[1]['lr'] *= args.lr_decay
if i+1 == args.epochs//2:
optimizer.param_groups[0]['lr'] *= args.lr_decay
optimizer.param_groups[1]['lr'] *= args.lr_decay
if i+1 == 3*args.epochs//4:
optimizer.param_groups[0]['lr'] *= args.lr_decay
optimizer.param_groups[1]['lr'] *= args.lr_decay
for item in train_loader:
tc_acc_num = 0
tr_acc_num = 0
t_acc_num = 0
net.train()
img = item['img'].cuda()
label = item['label'].cuda()
bbox = item['bbox'].cuda()
objects, scores, locs = net(img)
gt = bbox_encode(bbox=bbox,feature_map_size=(4,4),img_size=(128,128)).cuda()
loss = criterion(objects,scores,locs,label,gt,args.regre_weight,0.5,args.class_weight)
t_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
for j in range(img.size()[0]):
a,b,c = compute_three_acc(objects=objects[j].view(1,*objects[j].size()),
score=scores[j].view(1,*scores[j].size()), loc=locs[j].view(1,*locs[j].size()),
label=label[j].view(1,*label[j].size()), bbox=bbox[j].view(1,*bbox[j].size()))
tc_acc_num += a
tr_acc_num += b
t_acc_num += c
tc_acc += tc_acc_num/float(img.size()[0])
tr_acc += tr_acc_num / float(img.size()[0])
t_acc += t_acc_num / float(img.size()[0])
net.eval()
with t.no_grad():
for item2 in val_loader:
img = item2['img'].cuda()
label = item2['label'].cuda()
bbox = item2['bbox'].cuda()
objects, scores, locs = net(img)
class_acc,regression_acc,acc = compute_three_acc(objects=objects,score=scores,
loc=locs,label=label,bbox=bbox)
gt = bbox_encode(bbox=bbox, feature_map_size=(4, 4), img_size=(128, 128)).cuda()
vc_acc += class_acc
vr_acc += regression_acc
v_acc += acc
loss = criterion(objects, scores, locs,label, gt,args.regre_weight,0.5,args.class_weight)
v_loss +=loss.item()
v_loss /= len(val_loader)
vc_acc /= len(val_loader)
vr_acc /= len(val_loader)
v_acc /= len(val_loader)
# scheduler.step(v_loss)
print('train_loss: %.5f val_loss : %.5f' % (t_loss/len(train_loader),v_loss))
writer.add_scalar('low_lr_curve', optimizer.param_groups[0]["lr"], i + 1)
writer.add_scalar('high_lr_curve', optimizer.param_groups[1]["lr"], i + 1)
writer.add_scalars('loss', {'Train':t_loss / len(train_loader)}, i+1)
writer.add_scalars('loss', {'Val':v_loss}, i+1)
writer.add_scalars('train_acc', {'class_acc': tc_acc/ len(train_loader)}, i + 1)
writer.add_scalars('train_acc', {'regression_acc': tr_acc/ len(train_loader)}, i + 1)
writer.add_scalars('train_acc', {'two_task_acc': t_acc/ len(train_loader)}, i + 1)
writer.add_scalars('val_acc',{'class_acc':vc_acc},i+1)
writer.add_scalars('val_acc', {'regression_acc': vr_acc}, i + 1)
writer.add_scalars('val_acc', {'two_task_acc': v_acc}, i + 1)
if optimizer.param_groups[0]['lr'] <= 1e-8:
break
t.save(net,os.path.join(args.log_dir,args.save_file_name + 'epoch%d.pth'%i))
if __name__ == '__main__':
train() | 38.57672 | 108 | 0.603758 | from dataset import tiny_dataset
from bbox_codec import bbox_encode
from resnet50_base import Localization_net2
from torch.utils.data import DataLoader,random_split
import torch as t
import tqdm
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import torch.optim as optim
import argparse
from loss import Loss_for_localization
from evaluate import compute_three_acc
import os
def parser():
parser = argparse.ArgumentParser()
parser.add_argument('--lr',help='learning rate',type=float,default=1e-2,dest='lr')
parser.add_argument('--batch-size',help='batchsize',type=int,default=32,dest='batch_size')
parser.add_argument('--lr-decay',help='the decay of lr',type=float,default=0.1,dest='lr_decay')
parser.add_argument('--root',help='root directory of dataset',type=str,
default=r'E:\BS_learning\4_1\CV_basis\experiment\2\tiny_vid',dest='root')
parser.add_argument('--weight-decay',help='weight decay of optimizer',type=float,
default=1e-5,dest='weight_decay')
parser.add_argument('--epochs',help='set the num of epochs',type=int,default=100)
parser.add_argument('--log-dir',help='tensorboard log dir',type=str,required=True)
parser.add_argument('--save-file-name', help='the pth file name', type=str,required=True)
parser.add_argument('--class-weight',help='the weight of classification of the loss',default=1,type=int)
parser.add_argument('--regre-weight', help='the weight of regression of the loss', default=2,type=int)
return parser
def weight_init(net):
for name,child in net.named_children():
if name == 'feature_extraction':
continue
if isinstance(child,nn.Conv2d):
nn.init.kaiming_normal_(child.weight)
if child.bias != None:
nn.init.zeros_(child.bias)
elif isinstance(child,nn.Linear):
nn.init.kaiming_normal_(child.weight)
if child.bias != None:
nn.init.zeros_(child.bias)
return net
def train():
args = parser().parse_args()
t.manual_seed(777)
t.cuda.manual_seed(777)
dataset = tiny_dataset(root=args.root)
train_set,val_set = random_split(dataset=dataset,lengths=[150*5,30*5],
generator=t.Generator().manual_seed(777))
train_loader = DataLoader(dataset=train_set,batch_size=args.batch_size,shuffle=True,num_workers=2)
val_loader = DataLoader(dataset=val_set,batch_size=1,shuffle=False,num_workers=0)
print('establish the net ...')
net = Localization_net2(class_num=5).cuda()
print('initialize the net')
net = weight_init(net=net)
high_lr_list = []
low_lr_list = []
for name,param in net.named_parameters():
if 'feature_extraction' in name:
low_lr_list.append(param)
else:
high_lr_list.append(param)
optimizer = optim.SGD([{'params':low_lr_list,'lr':0.1*args.lr},{'params':high_lr_list}],
lr=args.lr,weight_decay=args.weight_decay,momentum=0.9)
writer = SummaryWriter(log_dir=args.log_dir,comment='curves_log')
criterion = Loss_for_localization().cuda()
for i in tqdm.tqdm(range(args.epochs)):
t_loss = 0.
tc_acc = 0.
tr_acc = 0.
t_acc = 0.
v_loss = 0.
vc_acc = 0.
vr_acc = 0.
v_acc = 0.
print('\n%dth epoch'%(i+1))
if i+1 == args.epochs//4:
optimizer.param_groups[0]['lr'] *= args.lr_decay
optimizer.param_groups[1]['lr'] *= args.lr_decay
if i+1 == args.epochs//2:
optimizer.param_groups[0]['lr'] *= args.lr_decay
optimizer.param_groups[1]['lr'] *= args.lr_decay
if i+1 == 3*args.epochs//4:
optimizer.param_groups[0]['lr'] *= args.lr_decay
optimizer.param_groups[1]['lr'] *= args.lr_decay
for item in train_loader:
tc_acc_num = 0
tr_acc_num = 0
t_acc_num = 0
net.train()
img = item['img'].cuda()
label = item['label'].cuda()
bbox = item['bbox'].cuda()
objects, scores, locs = net(img)
gt = bbox_encode(bbox=bbox,feature_map_size=(4,4),img_size=(128,128)).cuda()
loss = criterion(objects,scores,locs,label,gt,args.regre_weight,0.5,args.class_weight)
t_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
for j in range(img.size()[0]):
a,b,c = compute_three_acc(objects=objects[j].view(1,*objects[j].size()),
score=scores[j].view(1,*scores[j].size()), loc=locs[j].view(1,*locs[j].size()),
label=label[j].view(1,*label[j].size()), bbox=bbox[j].view(1,*bbox[j].size()))
tc_acc_num += a
tr_acc_num += b
t_acc_num += c
tc_acc += tc_acc_num/float(img.size()[0])
tr_acc += tr_acc_num / float(img.size()[0])
t_acc += t_acc_num / float(img.size()[0])
net.eval()
with t.no_grad():
for item2 in val_loader:
img = item2['img'].cuda()
label = item2['label'].cuda()
bbox = item2['bbox'].cuda()
objects, scores, locs = net(img)
class_acc,regression_acc,acc = compute_three_acc(objects=objects,score=scores,
loc=locs,label=label,bbox=bbox)
gt = bbox_encode(bbox=bbox, feature_map_size=(4, 4), img_size=(128, 128)).cuda()
vc_acc += class_acc
vr_acc += regression_acc
v_acc += acc
loss = criterion(objects, scores, locs,label, gt,args.regre_weight,0.5,args.class_weight)
v_loss +=loss.item()
v_loss /= len(val_loader)
vc_acc /= len(val_loader)
vr_acc /= len(val_loader)
v_acc /= len(val_loader)
print('train_loss: %.5f val_loss : %.5f' % (t_loss/len(train_loader),v_loss))
writer.add_scalar('low_lr_curve', optimizer.param_groups[0]["lr"], i + 1)
writer.add_scalar('high_lr_curve', optimizer.param_groups[1]["lr"], i + 1)
writer.add_scalars('loss', {'Train':t_loss / len(train_loader)}, i+1)
writer.add_scalars('loss', {'Val':v_loss}, i+1)
writer.add_scalars('train_acc', {'class_acc': tc_acc/ len(train_loader)}, i + 1)
writer.add_scalars('train_acc', {'regression_acc': tr_acc/ len(train_loader)}, i + 1)
writer.add_scalars('train_acc', {'two_task_acc': t_acc/ len(train_loader)}, i + 1)
writer.add_scalars('val_acc',{'class_acc':vc_acc},i+1)
writer.add_scalars('val_acc', {'regression_acc': vr_acc}, i + 1)
writer.add_scalars('val_acc', {'two_task_acc': v_acc}, i + 1)
if optimizer.param_groups[0]['lr'] <= 1e-8:
break
t.save(net,os.path.join(args.log_dir,args.save_file_name + 'epoch%d.pth'%i))
if __name__ == '__main__':
train() | true | true |
f731f79225b7db086a4396618b46b21004ef1931 | 7,642 | py | Python | armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py | albeanth/armi | 3755ffd2fcd1f7b6c557ef3e3f36126706a84c70 | [
"Apache-2.0"
] | null | null | null | armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py | albeanth/armi | 3755ffd2fcd1f7b6c557ef3e3f36126706a84c70 | [
"Apache-2.0"
] | null | null | null | armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py | albeanth/armi | 3755ffd2fcd1f7b6c557ef3e3f36126706a84c70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An abstract class for interfaces between ARMI and programs that simulate transmutation and decay.
"""
import collections
from armi import interfaces
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO import xsLibraries
from armi.physics.neutronics.isotopicDepletion.crossSectionTable import (
CrossSectionTable,
)
from armi.reactor import composites
from armi.reactor.flags import Flags
def isDepletable(obj: composites.ArmiObject):
"""
Return True if obj or any child is flagged as DEPLETABLE.
The DEPLETABLE flag is automatically set to true if any composition contains
nuclides that are in the active nuclides list, unless flags are specifically
set and DEPLETABLE is left out.
This is often interpreted by depletion plugins as indicating which parts of the
problem to apply depletion to. Analysts may want to turn on and off depletion
in certain problems.
For example, sometimes they want the control rods to deplete
to figure out how often to replace them. But in conceptual design, they may want to just
leave them as they are as an approximation.
.. warning:: The ``DEPLETABLE`` flag is automatically added to compositions that have
active nuclides. If you explicitly define any flags at all, you must also
manually include ``DEPLETABLE`` or else the objects will silently not deplete.
Notes
-----
The auto-flagging of ``DEPLETABLE`` happens in the construction of blueprints
rather than in a plugin hook because the reactor is not available at the time
the plugin hook runs.
See Also
--------
armi.reactor.blueprints.componentBlueprint.insertDepletableNuclideKeys
"""
return obj.hasFlags(Flags.DEPLETABLE) or obj.containsAtLeastOneChildWithFlags(
Flags.DEPLETABLE
)
class AbstractIsotopicDepleter:
r"""
Interact with a depletion code
This interface and subClasses deplete under a flux defined outside this
interface
The depletion in this analysis only depends on the flux, material vectors,
nuclear data and countinuous source and loss objects.
The depleters derived from this abstract class use all the fission products
armi can handle -- i.e. do not form lumped fission products.
_depleteByName contains a ARMI objects to deplete keyed by name.
"""
name = None
function = "depletion"
def __init__(self, r=None, cs=None, o=None):
self.r = r
self.cs = cs
self.o = o
# ARMI objects to deplete keyed by name
# order is important for consistency in iterating through objects
# cinder interface input format is very dependent on object order
self._depleteByName = collections.OrderedDict()
self.efpdToBurn = None
self.allNuclidesInProblem = r.blueprints.allNuclidesInProblem if r else []
def addToDeplete(self, armiObj):
"""Add the oject to the group of objects to be depleted."""
self._depleteByName[armiObj.getName()] = armiObj
def setToDeplete(self, armiObjects):
"""Change the group of objects to deplete to the specified group."""
listOfTuples = [(obj.getName(), obj) for obj in armiObjects]
self._depleteByName = collections.OrderedDict(listOfTuples)
def getToDeplete(self):
"""Return objects to be depleted."""
return list(self._depleteByName.values())
def run(self):
r"""
Submit depletion case with external solver to the cluster.
In addition to running the physics kernel, this method calls the waitForJob method
to wait for it job to finish
comm = MPI.COMM_SELF.Spawn(sys.executable,args=['cpi.py'],maxprocs=5)
"""
raise NotImplementedError
def makeXsecTable(
compositeName,
xsType,
mgFlux,
isotxs,
headerFormat="$ xsecs for {}",
tableFormat="\n{mcnpId} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}",
):
"""
Make a cross section table for depletion physics input decks.
Parameters
----------
armiObject: armiObject
an armi object -- batch or block --
with a .p.xsType and a getMgFlux method
activeNuclides: list
a list of the nucNames of active isotopes
isotxs: isotxs object
headerFormat: string (optional)
this is the format in which the elements of the header with be returned
-- i.e. if you use a .format() call with the case name you'll return a
formatted list of string elements
tableFormat: string (optional)
this is the format in which the elements of the table with be returned
-- i.e. if you use a .format() call with mcnpId, nG, nF, n2n, n3n, nA,
and nP you'll get the format you want. If you use a .format() call with the case name you'll return a
formatted list of string elements
Results
-------
output: list
a list of string elements that together make a xsec card
See Also
--------
crossSectionTable.makeCrossSectionTable
Makes a table for arbitrary ArmiObjects
"""
xsTable = CrossSectionTable()
if not xsType or not sum(mgFlux) > 0:
return []
xsTable.setName(compositeName)
totalFlux = sum(mgFlux)
for nucLabel, nuc in isotxs.items():
if xsType != xsLibraries.getSuffixFromNuclideLabel(nucLabel):
continue
nucName = nuc.name
nb = nuclideBases.byName[nucName]
if isinstance(
nb, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase)
):
continue
microMultiGroupXS = isotxs[nucLabel].micros
if not isinstance(nb, nuclideBases.NaturalNuclideBase):
xsTable.addMultiGroupXS(nucName, microMultiGroupXS, mgFlux, totalFlux)
return xsTable.getXsecTable(headerFormat=headerFormat, tableFormat=tableFormat)
class AbstractIsotopicDepletionReader(interfaces.OutputReader):
r"""
Read number density output produced by the isotopic depletion
"""
def read(self):
r"""
read a isotopic depletion Output File and applies results to armi objects in the ToDepletion attribute
"""
raise NotImplementedError
class Csrc:
"""
Writes a continuous source term card in a depletion interface.
Notes
-----
The chemical vector is a dictionary of chemicals and their removal rate
constant -- this works like a decay constant.
The isotopic vector is used to make a source material in continuous source definitions.
This is also the base class for continuous loss cards.
"""
def __init__(self):
self._chemicalVector = {}
self._isotopicVector = {}
self.defaultVector = {"0": 0}
def setChemicalVector(self, chemicalVector):
self._chemicalVector = chemicalVector
def getChemicalVector(self):
return self._chemicalVector
def write(self):
"""
return a list of lines to write for a csrc card
"""
raise NotImplementedError
| 33.964444 | 110 | 0.691965 |
import collections
from armi import interfaces
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO import xsLibraries
from armi.physics.neutronics.isotopicDepletion.crossSectionTable import (
CrossSectionTable,
)
from armi.reactor import composites
from armi.reactor.flags import Flags
def isDepletable(obj: composites.ArmiObject):
return obj.hasFlags(Flags.DEPLETABLE) or obj.containsAtLeastOneChildWithFlags(
Flags.DEPLETABLE
)
class AbstractIsotopicDepleter:
name = None
function = "depletion"
def __init__(self, r=None, cs=None, o=None):
self.r = r
self.cs = cs
self.o = o
self._depleteByName = collections.OrderedDict()
self.efpdToBurn = None
self.allNuclidesInProblem = r.blueprints.allNuclidesInProblem if r else []
def addToDeplete(self, armiObj):
self._depleteByName[armiObj.getName()] = armiObj
def setToDeplete(self, armiObjects):
listOfTuples = [(obj.getName(), obj) for obj in armiObjects]
self._depleteByName = collections.OrderedDict(listOfTuples)
def getToDeplete(self):
return list(self._depleteByName.values())
def run(self):
raise NotImplementedError
def makeXsecTable(
compositeName,
xsType,
mgFlux,
isotxs,
headerFormat="$ xsecs for {}",
tableFormat="\n{mcnpId} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}",
):
xsTable = CrossSectionTable()
if not xsType or not sum(mgFlux) > 0:
return []
xsTable.setName(compositeName)
totalFlux = sum(mgFlux)
for nucLabel, nuc in isotxs.items():
if xsType != xsLibraries.getSuffixFromNuclideLabel(nucLabel):
continue
nucName = nuc.name
nb = nuclideBases.byName[nucName]
if isinstance(
nb, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase)
):
continue
microMultiGroupXS = isotxs[nucLabel].micros
if not isinstance(nb, nuclideBases.NaturalNuclideBase):
xsTable.addMultiGroupXS(nucName, microMultiGroupXS, mgFlux, totalFlux)
return xsTable.getXsecTable(headerFormat=headerFormat, tableFormat=tableFormat)
class AbstractIsotopicDepletionReader(interfaces.OutputReader):
def read(self):
raise NotImplementedError
class Csrc:
def __init__(self):
self._chemicalVector = {}
self._isotopicVector = {}
self.defaultVector = {"0": 0}
def setChemicalVector(self, chemicalVector):
self._chemicalVector = chemicalVector
def getChemicalVector(self):
return self._chemicalVector
def write(self):
raise NotImplementedError
| true | true |
f731f79d2b20fb9001ff30e91ee5ab29fa4434c9 | 3,269 | py | Python | building_boundary/footprint.py | Geodan/building-boundary | d0eb88d99743af917568131e8609f481b10e4520 | [
"MIT"
] | 13 | 2018-12-14T05:34:48.000Z | 2021-12-23T13:32:51.000Z | building_boundary/footprint.py | Geodan/building-boundary | d0eb88d99743af917568131e8609f481b10e4520 | [
"MIT"
] | 20 | 2018-10-18T13:42:09.000Z | 2022-03-22T08:46:13.000Z | building_boundary/footprint.py | Geodan/building-boundary | d0eb88d99743af917568131e8609f481b10e4520 | [
"MIT"
] | 10 | 2018-09-28T20:01:30.000Z | 2020-11-30T17:40:19.000Z | # -*- coding: utf-8 -*-
"""
@author: Chris Lucas
"""
import math
import numpy as np
from shapely.geometry import (
Polygon, MultiPolygon, LineString, MultiLineString, LinearRing
)
from shapely import wkt
from building_boundary import utils
def line_orientations(lines):
"""
Computes the orientations of the lines.
Parameters
----------
lines : list of (2x2) array
The lines defined by the coordinates two points.
Returns
-------
orientations : list of float
The orientations of the lines in radians from
0 to pi (east to west counterclockwise)
0 to -pi (east to west clockwise)
"""
orientations = []
for l in lines:
dx, dy = l[0] - l[1]
orientation = math.atan2(dy, dx)
if not any([np.isclose(orientation, o) for o in orientations]):
orientations.append(orientation)
return orientations
def geometry_orientations(geom):
"""
Computes the orientations of the lines of a geometry (Polygon,
MultiPolygon, LineString, MultiLineString, or LinearRing).
Parameters
----------
geom : Polygon, MultiPolygon, LineString, MultiLineString, or LinearRing
The geometry
Returns
-------
orientations : list of float
The orientations of the lines of the geometry in radians from
0 to pi (east to west counterclockwise)
0 to -pi (east to west clockwise)
"""
orientations = []
if type(geom) == Polygon:
lines = utils.create_pairs(geom.exterior.coords[:-1])
orientations = line_orientations(lines)
elif type(geom) == MultiPolygon:
for p in geom:
lines = utils.create_pairs(p.exterior.coords[:-1])
orientations.extend(line_orientations(lines))
elif type(geom) == LineString:
if geom.coords[0] == geom.coords[-1]:
lines = utils.create_pairs(geom.coords[:-1])
else:
lines = list(utils.create_pairs(geom.coords))[:-1]
orientations = line_orientations(lines)
elif type(geom) == MultiLineString:
for l in geom:
if l.coords[0] == l.coords[-1]:
lines = utils.create_pairs(l.coords[:-1])
else:
lines = list(utils.create_pairs(l.coords))[:-1]
orientations.extend(line_orientations(lines))
elif type(geom) == LinearRing:
lines = utils.create_pairs(geom.coords[:-1])
orientations = line_orientations(lines)
else:
raise TypeError('Invalid geometry type. Expects Polygon, '
'MultiPolygon, LineString, MultiLineString, '
'or LinearRing.')
return orientations
def compute_orientations(footprint_wkt):
"""
Computes the orientations of the footprint.
Parameters
----------
footprint_wkt : string
The footprint geometry defined by a WKT string.
Returns
-------
orientations : list of float
The orientations of the lines of the geometry in radians from
0 to pi (east to west counterclockwise)
0 to -pi (east to west clockwise)
"""
footprint_geom = wkt.loads(footprint_wkt)
orientations = geometry_orientations(footprint_geom)
return orientations
| 29.718182 | 76 | 0.624962 |
import math
import numpy as np
from shapely.geometry import (
Polygon, MultiPolygon, LineString, MultiLineString, LinearRing
)
from shapely import wkt
from building_boundary import utils
def line_orientations(lines):
orientations = []
for l in lines:
dx, dy = l[0] - l[1]
orientation = math.atan2(dy, dx)
if not any([np.isclose(orientation, o) for o in orientations]):
orientations.append(orientation)
return orientations
def geometry_orientations(geom):
orientations = []
if type(geom) == Polygon:
lines = utils.create_pairs(geom.exterior.coords[:-1])
orientations = line_orientations(lines)
elif type(geom) == MultiPolygon:
for p in geom:
lines = utils.create_pairs(p.exterior.coords[:-1])
orientations.extend(line_orientations(lines))
elif type(geom) == LineString:
if geom.coords[0] == geom.coords[-1]:
lines = utils.create_pairs(geom.coords[:-1])
else:
lines = list(utils.create_pairs(geom.coords))[:-1]
orientations = line_orientations(lines)
elif type(geom) == MultiLineString:
for l in geom:
if l.coords[0] == l.coords[-1]:
lines = utils.create_pairs(l.coords[:-1])
else:
lines = list(utils.create_pairs(l.coords))[:-1]
orientations.extend(line_orientations(lines))
elif type(geom) == LinearRing:
lines = utils.create_pairs(geom.coords[:-1])
orientations = line_orientations(lines)
else:
raise TypeError('Invalid geometry type. Expects Polygon, '
'MultiPolygon, LineString, MultiLineString, '
'or LinearRing.')
return orientations
def compute_orientations(footprint_wkt):
footprint_geom = wkt.loads(footprint_wkt)
orientations = geometry_orientations(footprint_geom)
return orientations
| true | true |
f731f9a8d0502796635be32c92304f9df4b62384 | 25,546 | py | Python | nnunet/training/network_training/network_trainer.py | mangoyuan/Unifed-Seg3d | 74c82464dbe901cf18e38afb0e1b74cc159a8850 | [
"Apache-2.0"
] | null | null | null | nnunet/training/network_training/network_trainer.py | mangoyuan/Unifed-Seg3d | 74c82464dbe901cf18e38afb0e1b74cc159a8850 | [
"Apache-2.0"
] | null | null | null | nnunet/training/network_training/network_trainer.py | mangoyuan/Unifed-Seg3d | 74c82464dbe901cf18e38afb0e1b74cc159a8850 | [
"Apache-2.0"
] | null | null | null | from _warnings import warn
import matplotlib
from batchgenerators.utilities.file_and_folder_operations import *
from sklearn.model_selection import KFold
matplotlib.use("agg")
from time import time, sleep
import torch
import numpy as np
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import sys
from collections import OrderedDict
from datetime import datetime
import torch.backends.cudnn as cudnn
from abc import abstractmethod
from datetime import datetime
try:
from apex import amp
except ImportError:
amp = None
class NetworkTrainer(object):
def __init__(self, deterministic=True, fp16=False):
"""
A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such
as the training loop, tracking of training and validation losses (and the target metric if you implement it)
Training can be terminated early if the validation loss (or the target metric if implemented) do not improve
anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth
results.
What you need to override:
- __init__
- initialize
- run_online_evaluation (optional)
- finish_online_evaluation (optional)
- validate
- predict_test_case
"""
np.random.seed(12345)
torch.manual_seed(12345)
torch.cuda.manual_seed_all(12345)
self.fp16 = fp16
if deterministic:
cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
################# SET THESE IN self.initialize() ###################################
self.network = None
self.optimizer = None
self.lr_scheduler = None
self.tr_gen = self.val_gen = None
self.was_initialized = False
################# SET THESE IN INIT ################################################
self.output_folder = None
self.fold = None
self.loss = None
self.dataset_directory = None
################# SET THESE IN LOAD_DATASET OR DO_SPLIT ############################
self.dataset = None # these can be None for inference mode
self.dataset_tr = self.dataset_val = None # do not need to be used, they just appear if you are using the suggested load_dataset_and_do_split
################# THESE DO NOT NECESSARILY NEED TO BE MODIFIED #####################
self.patience = 50
self.val_eval_criterion_alpha = 0.9 # alpha * old + (1-alpha) * new
# if this is too low then the moving average will be too noisy and the training may terminate early. If it is
# too high the training will take forever
self.train_loss_MA_alpha = 0.93 # alpha * old + (1-alpha) * new
self.train_loss_MA_eps = 5e-4 # new MA must be at least this much better (smaller)
self.save_every = 50
self.save_latest_only = True
self.max_num_epochs = 1000
self.num_batches_per_epoch = 250
self.num_val_batches_per_epoch = 50
self.also_val_in_tr_mode = False
self.lr_threshold = 1e-6 # the network will not terminate training if the lr is still above this threshold
################# LEAVE THESE ALONE ################################################
self.val_eval_criterion_MA = None
self.train_loss_MA = None
self.best_val_eval_criterion_MA = None
self.best_MA_tr_loss_for_patience = None
self.best_epoch_based_on_MA_tr_loss = None
self.all_tr_losses = []
self.all_val_losses = []
self.all_val_losses_tr_mode = []
self.all_val_eval_metrics = [] # does not have to be used
self.epoch = 0
self.log_file = None
self.deterministic = deterministic
@abstractmethod
def initialize(self, training=True):
"""
create self.output_folder
modify self.output_folder if you are doing cross-validation (one folder per fold)
set self.tr_gen and self.val_gen
set self.network, self.optimizer and self.lr_scheduler
finally set self.was_initialized to True
:param training:
:return:
"""
@abstractmethod
def load_dataset(self):
pass
def do_split(self):
"""
This is a suggestion for if your dataset is a dictionary (my personal standard)
:return:
"""
splits_file = join(self.dataset_directory, "splits_final.pkl")
if not isfile(splits_file):
self.print_to_log_file("Creating new split...")
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[-1]['train'] = train_keys
splits[-1]['val'] = test_keys
save_pickle(splits, splits_file)
splits = load_pickle(splits_file)
if self.fold == "all":
tr_keys = val_keys = list(self.dataset.keys())
else:
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def plot_progress(self):
"""
Should probably by improved
:return:
"""
try:
font = {'weight': 'normal',
'size': 18}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(30, 24))
ax = fig.add_subplot(111)
ax2 = ax.twinx()
x_values = list(range(self.epoch + 1))
ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label="loss_tr")
ax.plot(x_values, self.all_val_losses, color='r', ls='-', label="loss_val, train=False")
if len(self.all_val_losses_tr_mode) > 0:
ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label="loss_val, train=True")
if len(self.all_val_eval_metrics) == len(self.all_val_losses):
ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label="evaluation metric")
ax.set_xlabel("epoch")
ax.set_ylabel("loss")
ax2.set_ylabel("evaluation metric")
ax.legend()
ax2.legend(loc=9)
fig.savefig(join(self.output_folder, "progress.png"))
plt.close()
except IOError:
self.print_to_log_file("failed to plot: ", sys.exc_info())
def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):
timestamp = time()
dt_object = datetime.fromtimestamp(timestamp)
if add_timestamp:
args = ("%s:" % dt_object, *args)
if self.log_file is None:
maybe_mkdir_p(self.output_folder)
timestamp = datetime.now()
self.log_file = join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" %
(timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second))
with open(self.log_file, 'w') as f:
f.write("Starting... \n")
successful = False
max_attempts = 5
ctr = 0
while not successful and ctr < max_attempts:
try:
with open(self.log_file, 'a+') as f:
for a in args:
f.write(str(a))
f.write(" ")
f.write("\n")
successful = True
except IOError:
print("%s: failed to log: " % datetime.fromtimestamp(timestamp), sys.exc_info())
sleep(0.5)
ctr += 1
if also_print_to_console:
print(*args)
def save_checkpoint(self, fname, save_optimizer=True):
start_time = time()
state_dict = self.network.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
lr_sched_state_dct = None
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
lr_sched_state_dct = self.lr_scheduler.state_dict()
for key in lr_sched_state_dct.keys():
lr_sched_state_dct[key] = lr_sched_state_dct[key]
if save_optimizer:
optimizer_state_dict = self.optimizer.state_dict()
else:
optimizer_state_dict = None
self.print_to_log_file("saving checkpoint...")
torch.save({
'epoch': self.epoch + 1,
'state_dict': state_dict,
'optimizer_state_dict': optimizer_state_dict,
'lr_scheduler_state_dict': lr_sched_state_dct,
'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode,
self.all_val_eval_metrics)},
fname)
self.print_to_log_file("done, saving took %.2f seconds" % (time() - start_time))
def load_best_checkpoint(self, train=True):
if self.fold is None:
raise RuntimeError("Cannot load best checkpoint if self.fold is None")
self.load_checkpoint(join(self.output_folder, "model_best.model"), train=train)
def load_latest_checkpoint(self, train=True):
if isfile(join(self.output_folder, "model_final_checkpoint.model")):
return self.load_checkpoint(join(self.output_folder, "model_final_checkpoint.model"), train=train)
if isfile(join(self.output_folder, "model_latest.model")):
return self.load_checkpoint(join(self.output_folder, "model_latest.model"), train=train)
all_checkpoints = [i for i in os.listdir(self.output_folder) if i.endswith(".model") and i.find("_ep_") != -1]
if len(all_checkpoints) == 0:
return self.load_best_checkpoint(train=train)
corresponding_epochs = [int(i.split("_")[-1].split(".")[0]) for i in all_checkpoints]
checkpoint = all_checkpoints[np.argmax(corresponding_epochs)]
self.load_checkpoint(join(self.output_folder, checkpoint), train=train)
def load_checkpoint(self, fname, train=True):
self.print_to_log_file("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize(train)
saved_model = torch.load(fname, map_location=torch.device('cuda', torch.cuda.current_device()))
self.load_checkpoint_ram(saved_model, train)
def load_checkpoint_ram(self, saved_model, train=True):
"""
used for if the checkpoint is already in ram
:param saved_model:
:param train:
:return:
"""
if not self.was_initialized:
self.initialize(train)
new_state_dict = OrderedDict()
curr_state_dict_keys = list(self.network.state_dict().keys())
# if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not
# match. Use heuristic to make it match
for k, value in saved_model['state_dict'].items():
key = k
if key not in curr_state_dict_keys:
key = key[7:]
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])
self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = saved_model['plot_stuff']
def _maybe_init_amp(self):
# we use fp16 for training only, not inference
if self.fp16:
if amp is not None:
self.network, self.optimizer = amp.initialize(self.network, self.optimizer, opt_level="O1")
else:
self.print_to_log_file("WARNING: FP16 training was requested but nvidia apex is not installed. "
"Install it from https://github.com/NVIDIA/apex")
def run_training(self):
torch.cuda.empty_cache()
self._maybe_init_amp()
if cudnn.benchmark and cudnn.deterministic:
warn("torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. "
"But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! "
"If you want deterministic then set benchmark=False")
maybe_mkdir_p(self.output_folder)
if not self.was_initialized:
self.initialize(True)
while self.epoch < self.max_num_epochs:
self.print_to_log_file("\nepoch: ", self.epoch)
epoch_start_time = time()
train_losses_epoch = []
# train one epoch
self.network.train()
for b in range(self.num_batches_per_epoch):
l = self.run_iteration(self.tr_gen, True)
train_losses_epoch.append(l)
self.all_tr_losses.append(np.mean(train_losses_epoch))
self.print_to_log_file("train loss : %.4f" % self.all_tr_losses[-1])
with torch.no_grad():
# validation with train=False
self.network.eval()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False, True)
val_losses.append(l)
self.all_val_losses.append(np.mean(val_losses))
self.print_to_log_file("val loss (train=False): %.4f" % self.all_val_losses[-1])
if self.also_val_in_tr_mode:
self.network.train()
# validation with train=True
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False)
val_losses.append(l)
self.all_val_losses_tr_mode.append(np.mean(val_losses))
self.print_to_log_file("val loss (train=True): %.4f" % self.all_val_losses_tr_mode[-1])
epoch_end_time = time()
self.update_train_loss_MA() # needed for lr scheduler and stopping of training
continue_training = self.on_epoch_end()
if not continue_training:
# allows for early stopping
break
self.epoch += 1
self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time-epoch_start_time))
self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model"))
# now we can delete latest as it will be identical with final
if isfile(join(self.output_folder, "model_latest.model")):
os.remove(join(self.output_folder, "model_latest.model"))
if isfile(join(self.output_folder, "model_latest.model.pkl")):
os.remove(join(self.output_folder, "model_latest.model.pkl"))
def maybe_update_lr(self):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def maybe_save_checkpoint(self):
"""
Saves a checkpoint every save_ever epochs.
:return:
"""
if self.epoch % self.save_every == (self.save_every - 1):
self.print_to_log_file("saving scheduled checkpoint file...")
if not self.save_latest_only:
self.save_checkpoint(join(self.output_folder, "model_ep_%03.0d.model" % (self.epoch + 1)))
self.save_checkpoint(join(self.output_folder, "model_latest.model"))
self.print_to_log_file("done")
def update_eval_criterion_MA(self):
"""
If self.all_val_eval_metrics is unused (len=0) then we fall back to using -self.all_val_losses for the MA to determine early stopping
(not a minimization, but a maximization of a metric and therefore the - in the latter case)
:return:
"""
if self.val_eval_criterion_MA is None:
if len(self.all_val_eval_metrics) == 0:
self.val_eval_criterion_MA = - self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.all_val_eval_metrics[-1]
else:
if len(self.all_val_eval_metrics) == 0:
"""
We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower
is better, so we need to negate it.
"""
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - (
1 - self.val_eval_criterion_alpha) * \
self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + (
1 - self.val_eval_criterion_alpha) * \
self.all_val_eval_metrics[-1]
def manage_patience(self):
# update patience
continue_training = True
if self.patience is not None:
# if best_MA_tr_loss_for_patience and best_epoch_based_on_MA_tr_loss were not yet initialized,
# initialize them
if self.best_MA_tr_loss_for_patience is None:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
if self.best_epoch_based_on_MA_tr_loss is None:
self.best_epoch_based_on_MA_tr_loss = self.epoch
if self.best_val_eval_criterion_MA is None:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
# check if the current epoch is the best one according to moving average of validation criterion. If so
# then save 'best' model
# Do not use this for validation. This is intended for test set prediction only.
self.print_to_log_file("current best_val_eval_criterion_MA is %.4f0" % self.best_val_eval_criterion_MA)
self.print_to_log_file("current val_eval_criterion_MA is %.4f" % self.val_eval_criterion_MA)
if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
self.print_to_log_file("saving best epoch checkpoint...")
self.save_checkpoint(join(self.output_folder, "model_best.model"))
# Now see if the moving average of the train loss has improved. If yes then reset patience, else
# increase patience
if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
self.best_epoch_based_on_MA_tr_loss = self.epoch
self.print_to_log_file("New best epoch (train loss MA): %03.4f" % self.best_MA_tr_loss_for_patience)
else:
self.print_to_log_file("No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f" %
(self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps))
# if patience has reached its maximum then finish training (provided lr is low enough)
if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience:
if self.optimizer.param_groups[0]['lr'] > self.lr_threshold:
self.print_to_log_file("My patience ended, but I believe I need more time (lr > 1e-6)")
self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2
else:
self.print_to_log_file("My patience ended")
continue_training = False
else:
self.print_to_log_file(
"Patience: %d/%d" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience))
return continue_training
def on_epoch_end(self):
self.finish_online_evaluation() # does not have to do anything, but can be used to update self.all_val_eval_
# metrics
self.plot_progress()
self.maybe_update_lr()
self.maybe_save_checkpoint()
self.update_eval_criterion_MA()
continue_training = self.manage_patience()
continue_training = True
return continue_training
def update_train_loss_MA(self):
if self.train_loss_MA is None:
self.train_loss_MA = self.all_tr_losses[-1]
else:
self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \
self.all_tr_losses[-1]
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
if not isinstance(data, torch.Tensor):
data = torch.from_numpy(data).float()
if not isinstance(target, torch.Tensor):
target = torch.from_numpy(target).float()
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
self.optimizer.zero_grad()
output = self.network(data)
del data
l = self.loss(output, target)
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
if do_backprop:
if not self.fp16 or amp is None:
l.backward()
else:
with amp.scale_loss(l, self.optimizer) as scaled_loss:
scaled_loss.backward()
self.optimizer.step()
return l.detach().cpu().numpy()
def run_online_evaluation(self, *args, **kwargs):
"""
Can be implemented, does not have to
:param output_torch:
:param target_npy:
:return:
"""
pass
def finish_online_evaluation(self):
"""
Can be implemented, does not have to
:return:
"""
pass
@abstractmethod
def validate(self, *args, **kwargs):
pass
def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98):
"""
stolen and adapted from here: https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
:param num_iters:
:param init_value:
:param final_value:
:param beta:
:return:
"""
import math
self._maybe_init_amp()
mult = (final_value / init_value) ** (1/num_iters)
lr = init_value
self.optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
losses = []
log_lrs = []
for batch_num in range(1, num_iters + 1):
# +1 because this one here is not designed to have negative loss...
loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1
# Compute the smoothed loss
avg_loss = beta * avg_loss + (1-beta) * loss
smoothed_loss = avg_loss / (1 - beta**batch_num)
# Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
break
# Record the best loss
if smoothed_loss < best_loss or batch_num==1:
best_loss = smoothed_loss
# Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
# Update the lr for the next step
lr *= mult
self.optimizer.param_groups[0]['lr'] = lr
import matplotlib.pyplot as plt
lrs = [10 ** i for i in log_lrs]
fig = plt.figure()
plt.xscale('log')
plt.plot(lrs[10:-5], losses[10:-5])
plt.savefig(join(self.output_folder, "lr_finder.png"))
plt.close()
return log_lrs, losses
| 41.605863 | 149 | 0.607257 | from _warnings import warn
import matplotlib
from batchgenerators.utilities.file_and_folder_operations import *
from sklearn.model_selection import KFold
matplotlib.use("agg")
from time import time, sleep
import torch
import numpy as np
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import sys
from collections import OrderedDict
from datetime import datetime
import torch.backends.cudnn as cudnn
from abc import abstractmethod
from datetime import datetime
try:
from apex import amp
except ImportError:
amp = None
class NetworkTrainer(object):
def __init__(self, deterministic=True, fp16=False):
np.random.seed(12345)
torch.manual_seed(12345)
torch.cuda.manual_seed_all(12345)
self.fp16 = fp16
if deterministic:
cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
erministic is True indicating a deterministic training is desired. "
"But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! "
"If you want deterministic then set benchmark=False")
maybe_mkdir_p(self.output_folder)
if not self.was_initialized:
self.initialize(True)
while self.epoch < self.max_num_epochs:
self.print_to_log_file("\nepoch: ", self.epoch)
epoch_start_time = time()
train_losses_epoch = []
self.network.train()
for b in range(self.num_batches_per_epoch):
l = self.run_iteration(self.tr_gen, True)
train_losses_epoch.append(l)
self.all_tr_losses.append(np.mean(train_losses_epoch))
self.print_to_log_file("train loss : %.4f" % self.all_tr_losses[-1])
with torch.no_grad():
self.network.eval()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False, True)
val_losses.append(l)
self.all_val_losses.append(np.mean(val_losses))
self.print_to_log_file("val loss (train=False): %.4f" % self.all_val_losses[-1])
if self.also_val_in_tr_mode:
self.network.train()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False)
val_losses.append(l)
self.all_val_losses_tr_mode.append(np.mean(val_losses))
self.print_to_log_file("val loss (train=True): %.4f" % self.all_val_losses_tr_mode[-1])
epoch_end_time = time()
self.update_train_loss_MA()
continue_training = self.on_epoch_end()
if not continue_training:
break
self.epoch += 1
self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time-epoch_start_time))
self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model"))
if isfile(join(self.output_folder, "model_latest.model")):
os.remove(join(self.output_folder, "model_latest.model"))
if isfile(join(self.output_folder, "model_latest.model.pkl")):
os.remove(join(self.output_folder, "model_latest.model.pkl"))
def maybe_update_lr(self):
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def maybe_save_checkpoint(self):
if self.epoch % self.save_every == (self.save_every - 1):
self.print_to_log_file("saving scheduled checkpoint file...")
if not self.save_latest_only:
self.save_checkpoint(join(self.output_folder, "model_ep_%03.0d.model" % (self.epoch + 1)))
self.save_checkpoint(join(self.output_folder, "model_latest.model"))
self.print_to_log_file("done")
def update_eval_criterion_MA(self):
if self.val_eval_criterion_MA is None:
if len(self.all_val_eval_metrics) == 0:
self.val_eval_criterion_MA = - self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.all_val_eval_metrics[-1]
else:
if len(self.all_val_eval_metrics) == 0:
"""
We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower
is better, so we need to negate it.
"""
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - (
1 - self.val_eval_criterion_alpha) * \
self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + (
1 - self.val_eval_criterion_alpha) * \
self.all_val_eval_metrics[-1]
def manage_patience(self):
continue_training = True
if self.patience is not None:
if self.best_MA_tr_loss_for_patience is None:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
if self.best_epoch_based_on_MA_tr_loss is None:
self.best_epoch_based_on_MA_tr_loss = self.epoch
if self.best_val_eval_criterion_MA is None:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
self.print_to_log_file("current best_val_eval_criterion_MA is %.4f0" % self.best_val_eval_criterion_MA)
self.print_to_log_file("current val_eval_criterion_MA is %.4f" % self.val_eval_criterion_MA)
if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
self.print_to_log_file("saving best epoch checkpoint...")
self.save_checkpoint(join(self.output_folder, "model_best.model"))
if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
self.best_epoch_based_on_MA_tr_loss = self.epoch
self.print_to_log_file("New best epoch (train loss MA): %03.4f" % self.best_MA_tr_loss_for_patience)
else:
self.print_to_log_file("No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f" %
(self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps))
if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience:
if self.optimizer.param_groups[0]['lr'] > self.lr_threshold:
self.print_to_log_file("My patience ended, but I believe I need more time (lr > 1e-6)")
self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2
else:
self.print_to_log_file("My patience ended")
continue_training = False
else:
self.print_to_log_file(
"Patience: %d/%d" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience))
return continue_training
def on_epoch_end(self):
self.finish_online_evaluation()
self.plot_progress()
self.maybe_update_lr()
self.maybe_save_checkpoint()
self.update_eval_criterion_MA()
continue_training = self.manage_patience()
continue_training = True
return continue_training
def update_train_loss_MA(self):
if self.train_loss_MA is None:
self.train_loss_MA = self.all_tr_losses[-1]
else:
self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \
self.all_tr_losses[-1]
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
if not isinstance(data, torch.Tensor):
data = torch.from_numpy(data).float()
if not isinstance(target, torch.Tensor):
target = torch.from_numpy(target).float()
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
self.optimizer.zero_grad()
output = self.network(data)
del data
l = self.loss(output, target)
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
if do_backprop:
if not self.fp16 or amp is None:
l.backward()
else:
with amp.scale_loss(l, self.optimizer) as scaled_loss:
scaled_loss.backward()
self.optimizer.step()
return l.detach().cpu().numpy()
def run_online_evaluation(self, *args, **kwargs):
pass
def finish_online_evaluation(self):
pass
@abstractmethod
def validate(self, *args, **kwargs):
pass
def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98):
import math
self._maybe_init_amp()
mult = (final_value / init_value) ** (1/num_iters)
lr = init_value
self.optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
losses = []
log_lrs = []
for batch_num in range(1, num_iters + 1):
loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1
avg_loss = beta * avg_loss + (1-beta) * loss
smoothed_loss = avg_loss / (1 - beta**batch_num)
if batch_num > 1 and smoothed_loss > 4 * best_loss:
break
if smoothed_loss < best_loss or batch_num==1:
best_loss = smoothed_loss
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
lr *= mult
self.optimizer.param_groups[0]['lr'] = lr
import matplotlib.pyplot as plt
lrs = [10 ** i for i in log_lrs]
fig = plt.figure()
plt.xscale('log')
plt.plot(lrs[10:-5], losses[10:-5])
plt.savefig(join(self.output_folder, "lr_finder.png"))
plt.close()
return log_lrs, losses
| true | true |
f731fa3b1b137c89eee62971f1b7005782a10a8c | 4,079 | py | Python | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/list_postgresql_db_user_paginated_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/list_postgresql_db_user_paginated_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/list_postgresql_db_user_paginated_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListPostgresqlDbUserPaginatedResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'users': 'list[PostgresqlUserForList]',
'total_count': 'int'
}
attribute_map = {
'users': 'users',
'total_count': 'total_count'
}
def __init__(self, users=None, total_count=None):
"""ListPostgresqlDbUserPaginatedResponse - a model defined in huaweicloud sdk"""
super(ListPostgresqlDbUserPaginatedResponse, self).__init__()
self._users = None
self._total_count = None
self.discriminator = None
if users is not None:
self.users = users
if total_count is not None:
self.total_count = total_count
@property
def users(self):
"""Gets the users of this ListPostgresqlDbUserPaginatedResponse.
列表中每个元素表示一个数据库用户。
:return: The users of this ListPostgresqlDbUserPaginatedResponse.
:rtype: list[PostgresqlUserForList]
"""
return self._users
@users.setter
def users(self, users):
"""Sets the users of this ListPostgresqlDbUserPaginatedResponse.
列表中每个元素表示一个数据库用户。
:param users: The users of this ListPostgresqlDbUserPaginatedResponse.
:type: list[PostgresqlUserForList]
"""
self._users = users
@property
def total_count(self):
"""Gets the total_count of this ListPostgresqlDbUserPaginatedResponse.
数据库用户总数。
:return: The total_count of this ListPostgresqlDbUserPaginatedResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ListPostgresqlDbUserPaginatedResponse.
数据库用户总数。
:param total_count: The total_count of this ListPostgresqlDbUserPaginatedResponse.
:type: int
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListPostgresqlDbUserPaginatedResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.725352 | 90 | 0.592792 |
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListPostgresqlDbUserPaginatedResponse(SdkResponse):
sensitive_list = []
openapi_types = {
'users': 'list[PostgresqlUserForList]',
'total_count': 'int'
}
attribute_map = {
'users': 'users',
'total_count': 'total_count'
}
def __init__(self, users=None, total_count=None):
super(ListPostgresqlDbUserPaginatedResponse, self).__init__()
self._users = None
self._total_count = None
self.discriminator = None
if users is not None:
self.users = users
if total_count is not None:
self.total_count = total_count
@property
def users(self):
return self._users
@users.setter
def users(self, users):
self._users = users
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, total_count):
self._total_count = total_count
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ListPostgresqlDbUserPaginatedResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f731fa7e3673025c258022c3f2774631787e4331 | 3,643 | py | Python | partpipeline/parser.py | CIDARLAB/3DuF-server | 312da5e4dc3b6212db634ce6b0c0235d9779d27b | [
"BSD-3-Clause"
] | null | null | null | partpipeline/parser.py | CIDARLAB/3DuF-server | 312da5e4dc3b6212db634ce6b0c0235d9779d27b | [
"BSD-3-Clause"
] | null | null | null | partpipeline/parser.py | CIDARLAB/3DuF-server | 312da5e4dc3b6212db634ce6b0c0235d9779d27b | [
"BSD-3-Clause"
] | 3 | 2021-01-25T20:12:21.000Z | 2021-06-18T21:41:18.000Z | import json
import sys
from parchmint import Device
sys.path.append("/usr/lib/freecad-python3/lib")
import Draft
import FreeCAD
import Mesh
import Part
from threedprinting.components.box import Box
from threedprinting.components.connection import createConnection
from threedprinting.components.droplet import DropletGenerator
from threedprinting.components.port import Port
from threedprinting.export import exportToSTL
myDocument = FreeCAD.newDocument()
UM_MM = 1000 # Constant to convert um to mm
file_path = sys.argv[1]
print("File Name: " + file_path)
device = None
with open(file_path) as data_file:
text = data_file.read()
device_json = json.loads(text)
device = Device(device_json)
for component in device.components:
if component.entity == "PORT":
print("port")
x = (component.xpos) / UM_MM
y = (component.ypos) / UM_MM
dictionary = component.__dict__
params = dictionary["params"]
height = params.get_param("height") / UM_MM
pos = [x, y, -height / 2]
radius = params.get_param("portRadius") / UM_MM
port = FreeCAD.ActiveDocument.addObject("Part::FeaturePython", "Port")
Port(port, pos, radius=radius, height=height)
myDocument.recompute()
elif component.entity == "NOZZLE DROPLET GENERATOR":
print("droplet")
x = (component.xpos) / UM_MM
y = (component.ypos) / UM_MM
dictionary = component.__dict__
params = dictionary["params"]
waterInputWidth = params.get_param("waterInputWidth") / UM_MM
oilInputWidth = params.get_param("oilInputWidth") / UM_MM
orificeSize = params.get_param("orificeSize") / UM_MM
orificeLength = params.get_param("orificeLength") / UM_MM
outputLength = params.get_param("outputLength") / UM_MM
outputWidth = params.get_param("outputWidth") / UM_MM
height = params.get_param("height") / UM_MM
pos = [x, y, -height / 2]
droplet = FreeCAD.ActiveDocument.addObject(
"Part::FeaturePython", "DropletGenerator"
)
DropletGenerator(
droplet,
pos,
waterInputWidth,
oilInputWidth,
orificeSize,
orificeLength,
outputLength,
outputWidth,
height,
)
myDocument.recompute()
else:
print(component.entity, "not implemented")
x = (component.xpos) / UM_MM
y = (component.ypos) / UM_MM
height = params.get_param("height") / UM_MM
pos = [x, y, -height / 2]
width = component.xspan / UM_MM
length = component.yspan / UM_MM
box = FreeCAD.ActiveDocument.addObject("Part::FeaturePython", "Box")
Box(box, pos, height=height, width=width, length=length)
myDocument.recompute()
connections = []
for connection in device.connections:
print(connection.name)
dictionary = connection.__dict__
waypoints = dictionary["params"].get_param("wayPoints")
channelWidth = dictionary["params"].get_param("channelWidth")
height = dictionary["params"].get_param("height")
P = []
for (x, y) in waypoints:
x = x / UM_MM
y = y / UM_MM
P.append((x, y, 0))
connectionObject = createConnection(
P,
myDocument,
Type="CIRCLE",
channelWidth=channelWidth / UM_MM,
height=height / UM_MM,
)
connections.append(connectionObject)
objects = []
for obj in FreeCAD.ActiveDocument.Objects:
print(obj.Name)
objects.append(obj)
objects += connections
exportToSTL(objects, "DropletTest")
| 30.358333 | 78 | 0.6437 | import json
import sys
from parchmint import Device
sys.path.append("/usr/lib/freecad-python3/lib")
import Draft
import FreeCAD
import Mesh
import Part
from threedprinting.components.box import Box
from threedprinting.components.connection import createConnection
from threedprinting.components.droplet import DropletGenerator
from threedprinting.components.port import Port
from threedprinting.export import exportToSTL
myDocument = FreeCAD.newDocument()
UM_MM = 1000
file_path = sys.argv[1]
print("File Name: " + file_path)
device = None
with open(file_path) as data_file:
text = data_file.read()
device_json = json.loads(text)
device = Device(device_json)
for component in device.components:
if component.entity == "PORT":
print("port")
x = (component.xpos) / UM_MM
y = (component.ypos) / UM_MM
dictionary = component.__dict__
params = dictionary["params"]
height = params.get_param("height") / UM_MM
pos = [x, y, -height / 2]
radius = params.get_param("portRadius") / UM_MM
port = FreeCAD.ActiveDocument.addObject("Part::FeaturePython", "Port")
Port(port, pos, radius=radius, height=height)
myDocument.recompute()
elif component.entity == "NOZZLE DROPLET GENERATOR":
print("droplet")
x = (component.xpos) / UM_MM
y = (component.ypos) / UM_MM
dictionary = component.__dict__
params = dictionary["params"]
waterInputWidth = params.get_param("waterInputWidth") / UM_MM
oilInputWidth = params.get_param("oilInputWidth") / UM_MM
orificeSize = params.get_param("orificeSize") / UM_MM
orificeLength = params.get_param("orificeLength") / UM_MM
outputLength = params.get_param("outputLength") / UM_MM
outputWidth = params.get_param("outputWidth") / UM_MM
height = params.get_param("height") / UM_MM
pos = [x, y, -height / 2]
droplet = FreeCAD.ActiveDocument.addObject(
"Part::FeaturePython", "DropletGenerator"
)
DropletGenerator(
droplet,
pos,
waterInputWidth,
oilInputWidth,
orificeSize,
orificeLength,
outputLength,
outputWidth,
height,
)
myDocument.recompute()
else:
print(component.entity, "not implemented")
x = (component.xpos) / UM_MM
y = (component.ypos) / UM_MM
height = params.get_param("height") / UM_MM
pos = [x, y, -height / 2]
width = component.xspan / UM_MM
length = component.yspan / UM_MM
box = FreeCAD.ActiveDocument.addObject("Part::FeaturePython", "Box")
Box(box, pos, height=height, width=width, length=length)
myDocument.recompute()
connections = []
for connection in device.connections:
print(connection.name)
dictionary = connection.__dict__
waypoints = dictionary["params"].get_param("wayPoints")
channelWidth = dictionary["params"].get_param("channelWidth")
height = dictionary["params"].get_param("height")
P = []
for (x, y) in waypoints:
x = x / UM_MM
y = y / UM_MM
P.append((x, y, 0))
connectionObject = createConnection(
P,
myDocument,
Type="CIRCLE",
channelWidth=channelWidth / UM_MM,
height=height / UM_MM,
)
connections.append(connectionObject)
objects = []
for obj in FreeCAD.ActiveDocument.Objects:
print(obj.Name)
objects.append(obj)
objects += connections
exportToSTL(objects, "DropletTest")
| true | true |
f731fb049cc31abc6393352c8ef73b9b55622cf5 | 1,941 | py | Python | realtor/forms/staffCreation.py | franklin18ru/Indigo | ca559945de4724ea8d64af45a87d14b67740a146 | [
"MIT"
] | null | null | null | realtor/forms/staffCreation.py | franklin18ru/Indigo | ca559945de4724ea8d64af45a87d14b67740a146 | [
"MIT"
] | null | null | null | realtor/forms/staffCreation.py | franklin18ru/Indigo | ca559945de4724ea8d64af45a87d14b67740a146 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from realtor.models import Realtors, Positions
class Realtor(forms.ModelForm):
name = forms.CharField(required=True, widget=forms.TextInput(attrs={'class': 'form-control validate'}))
email = forms.EmailField(required=True, widget=forms.EmailInput(attrs={'class': 'form-control validate'}))
description = forms.CharField(required=True, widget=forms.Textarea(attrs={'class': 'form-control validate'}))
class Meta:
model = Realtors
exclude = ['id','password','image']
fields = [
'name',
'email',
'description',
'positions',
]
class RegisterForm(UserCreationForm):
first_name = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
username = forms.EmailField(
widget=forms.EmailInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
password1 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
password2 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True,min_length=4)
class Meta:
model = User
fields = [
'first_name',
'username',
'password1',
'password2',
'is_staff',
]
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.email = user.username
if commit:
user.save()
return user
def getUser(self):
return self.cleaned_data['username'] | 32.898305 | 113 | 0.623905 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from realtor.models import Realtors, Positions
class Realtor(forms.ModelForm):
name = forms.CharField(required=True, widget=forms.TextInput(attrs={'class': 'form-control validate'}))
email = forms.EmailField(required=True, widget=forms.EmailInput(attrs={'class': 'form-control validate'}))
description = forms.CharField(required=True, widget=forms.Textarea(attrs={'class': 'form-control validate'}))
class Meta:
model = Realtors
exclude = ['id','password','image']
fields = [
'name',
'email',
'description',
'positions',
]
class RegisterForm(UserCreationForm):
first_name = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
username = forms.EmailField(
widget=forms.EmailInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
password1 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True)
password2 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control validate', 'id': 'orangeForm-pass'}),
required=True,min_length=4)
class Meta:
model = User
fields = [
'first_name',
'username',
'password1',
'password2',
'is_staff',
]
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.email = user.username
if commit:
user.save()
return user
def getUser(self):
return self.cleaned_data['username'] | true | true |
f731fb11331a48d87e7ec2442b5be6c0a5ed4a52 | 34,160 | py | Python | azure-mgmt-web/azure/mgmt/web/operations/global_model_operations.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
] | null | null | null | azure-mgmt-web/azure/mgmt/web/operations/global_model_operations.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
] | null | null | null | azure-mgmt-web/azure/mgmt/web/operations/global_model_operations.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class GlobalModelOperations(object):
"""GlobalModelOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_subscription_publishing_credentials(
self, custom_headers=None, raw=False, **operation_config):
"""
Gets publishing credentials for the subscription owner
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`User <azure.mgmt.web.models.User>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/publishingCredentials'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_subscription_publishing_credentials(
self, request_message, custom_headers=None, raw=False, **operation_config):
"""
Updates publishing credentials for the subscription owner
:param request_message: requestMessage with new publishing credentials
:type request_message: :class:`User <azure.mgmt.web.models.User>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`User <azure.mgmt.web.models.User>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/publishingCredentials'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(request_message, 'User')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_subscription_geo_regions(
self, sku=None, custom_headers=None, raw=False, **operation_config):
"""
Gets list of available geo regions
:param sku: Filter only to regions that support this sku
:type sku: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`GeoRegionCollection
<azure.mgmt.web.models.GeoRegionCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/geoRegions'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if sku is not None:
query_parameters['sku'] = self._serialize.query("sku", sku, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GeoRegionCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_certificates(
self, custom_headers=None, raw=False, **operation_config):
"""
Get all certificates for a subscription
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CertificateCollection
<azure.mgmt.web.models.CertificateCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_server_farms(
self, detailed=None, custom_headers=None, raw=False, **operation_config):
"""
Gets all App Service Plans for a subcription
:param detailed: False to return a subset of App Service Plan
properties, true to return all of the properties.
Retrieval of all properties may increase the API latency.
:type detailed: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServerFarmCollection
<azure.mgmt.web.models.ServerFarmCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/serverfarms'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_sites(
self, custom_headers=None, raw=False, **operation_config):
"""
Gets all Web Apps for a subscription
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SiteCollection <azure.mgmt.web.models.SiteCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/sites'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SiteCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_hosting_environments(
self, custom_headers=None, raw=False, **operation_config):
"""
Gets all hostingEnvironments (App Service Environment) for a
subscription
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`HostingEnvironmentCollection
<azure.mgmt.web.models.HostingEnvironmentCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/hostingEnvironments'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HostingEnvironmentCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_managed_hosting_environments(
self, custom_headers=None, raw=False, **operation_config):
"""
Gets all managed hosting environments for a subscription
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ManagedHostingEnvironmentCollection
<azure.mgmt.web.models.ManagedHostingEnvironmentCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/managedHostingEnvironments'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagedHostingEnvironmentCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_classic_mobile_services(
self, custom_headers=None, raw=False, **operation_config):
"""
Gets all mobile services for a subscription
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ClassicMobileServiceCollection
<azure.mgmt.web.models.ClassicMobileServiceCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/classicMobileServices'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClassicMobileServiceCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_premier_add_on_offers(
self, custom_headers=None, raw=False, **operation_config):
"""
List premier add on offers
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/premieraddonoffers'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def is_hosting_environment_name_available(
self, name, custom_headers=None, raw=False, **operation_config):
"""
Whether hosting environment name is available
:param name: Hosting environment name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/ishostingenvironmentnameavailable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['name'] = self._serialize.query("name", name, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def is_hosting_environment_with_legacy_name_available(
self, name, custom_headers=None, raw=False, **operation_config):
"""
Whether hosting environment name is available
:param name: Hosting environment name
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/ishostingenvironmentnameavailable/{name}'
path_format_arguments = {
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def check_name_availability(
self, request, custom_headers=None, raw=False, **operation_config):
"""
Check if resource name is available
:param request: Name availability request
:type request: :class:`ResourceNameAvailabilityRequest
<azure.mgmt.web.models.ResourceNameAvailabilityRequest>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceNameAvailability
<azure.mgmt.web.models.ResourceNameAvailability>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/checknameavailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(request, 'ResourceNameAvailabilityRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceNameAvailability', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 42.860728 | 140 | 0.666598 |
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class GlobalModelOperations(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_subscription_publishing_credentials(
self, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/publishingCredentials'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_subscription_publishing_credentials(
self, request_message, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/publishingCredentials'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(request_message, 'User')
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('User', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_subscription_geo_regions(
self, sku=None, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/geoRegions'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if sku is not None:
query_parameters['sku'] = self._serialize.query("sku", sku, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GeoRegionCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_certificates(
self, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CertificateCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_server_farms(
self, detailed=None, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/serverfarms'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_sites(
self, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/sites'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SiteCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_hosting_environments(
self, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/hostingEnvironments'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HostingEnvironmentCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_managed_hosting_environments(
self, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/managedHostingEnvironments'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagedHostingEnvironmentCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_all_classic_mobile_services(
self, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/classicMobileServices'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClassicMobileServiceCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_premier_add_on_offers(
self, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/premieraddonoffers'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def is_hosting_environment_name_available(
self, name, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/ishostingenvironmentnameavailable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['name'] = self._serialize.query("name", name, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def is_hosting_environment_with_legacy_name_available(
self, name, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/ishostingenvironmentnameavailable/{name}'
path_format_arguments = {
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def check_name_availability(
self, request, custom_headers=None, raw=False, **operation_config):
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/checknameavailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
body_content = self._serialize.body(request, 'ResourceNameAvailabilityRequest')
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceNameAvailability', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.