hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790b34773520eea8188589b367d6905a106d2963
| 14,543
|
py
|
Python
|
pre_commit/main.py
|
utek/pre-commit
|
282527ef16588b943fe7c2ab8aadd3269946922f
|
[
"MIT"
] | null | null | null |
pre_commit/main.py
|
utek/pre-commit
|
282527ef16588b943fe7c2ab8aadd3269946922f
|
[
"MIT"
] | null | null | null |
pre_commit/main.py
|
utek/pre-commit
|
282527ef16588b943fe7c2ab8aadd3269946922f
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import os
import sys
from typing import Any
from typing import Optional
from typing import Sequence
from typing import Union
import pre_commit.constants as C
from pre_commit import color
from pre_commit import git
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.clean import clean
from pre_commit.commands.gc import gc
from pre_commit.commands.hook_impl import hook_impl
from pre_commit.commands.init_templatedir import init_templatedir
from pre_commit.commands.install_uninstall import install
from pre_commit.commands.install_uninstall import install_hooks
from pre_commit.commands.install_uninstall import uninstall
from pre_commit.commands.migrate_config import migrate_config
from pre_commit.commands.run import run
from pre_commit.commands.sample_config import sample_config
from pre_commit.commands.try_repo import try_repo
from pre_commit.error_handler import error_handler
from pre_commit.error_handler import FatalError
from pre_commit.logging_handler import logging_handler
from pre_commit.store import Store
from pre_commit.util import CalledProcessError
logger = logging.getLogger('pre_commit')
# https://github.com/pre-commit/pre-commit/issues/217
# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
# to install packages to the wrong place. We don't want anything to deal with
# pyvenv
os.environ.pop('__PYVENV_LAUNCHER__', None)
COMMANDS_NO_GIT = {'clean', 'gc', 'init-templatedir', 'sample-config'}
def _add_color_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'),
type=color.use_color,
metavar='{' + ','.join(color.COLOR_CHOICES) + '}',
help='Whether to use color in output. Defaults to `%(default)s`.',
)
def _add_config_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-c', '--config', default=C.CONFIG_FILE,
help='Path to alternate config file',
)
class AppendReplaceDefault(argparse.Action):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.appended = False
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[str], None],
option_string: Optional[str] = None,
) -> None:
if not self.appended:
setattr(namespace, self.dest, [])
self.appended = True
getattr(namespace, self.dest).append(values)
def _add_hook_type_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-t', '--hook-type', choices=(
'pre-commit', 'pre-merge-commit', 'pre-push',
'prepare-commit-msg', 'commit-msg', 'post-checkout',
),
action=AppendReplaceDefault,
default=['pre-commit'],
dest='hook_types',
)
def _add_run_options(parser: argparse.ArgumentParser) -> None:
parser.add_argument('hook', nargs='?', help='A single hook-id to run')
parser.add_argument('--verbose', '-v', action='store_true', default=False)
mutex_group = parser.add_mutually_exclusive_group(required=False)
mutex_group.add_argument(
'--all-files', '-a', action='store_true', default=False,
help='Run on all the files in the repo.',
)
mutex_group.add_argument(
'--files', nargs='*', default=[],
help='Specific filenames to run hooks on.',
)
parser.add_argument(
'--show-diff-on-failure', action='store_true',
help='When hooks fail, run `git diff` directly afterward.',
)
parser.add_argument(
'--hook-stage', choices=C.STAGES, default='commit',
help='The stage during which the hook is fired. One of %(choices)s',
)
parser.add_argument(
'--from-ref', '--source', '-s',
help=(
'(for usage with `--from-ref`) -- this option represents the '
'original ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch you are pushing '
'to. '
'For `post-checkout` hooks, this represents the branch that was '
'previously checked out.'
),
)
parser.add_argument(
'--to-ref', '--origin', '-o',
help=(
'(for usage with `--to-ref`) -- this option represents the '
'destination ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch being pushed. '
'For `post-checkout` hooks, this represents the branch that is '
'now checked out.'
),
)
parser.add_argument(
'--commit-msg-filename',
help='Filename to check when running during `commit-msg`',
)
parser.add_argument(
'--remote-name', help='Remote name used by `git push`.',
)
parser.add_argument('--remote-url', help='Remote url used by `git push`.')
parser.add_argument(
'--checkout-type',
help=(
'Indicates whether the checkout was a branch checkout '
'(changing branches, flag=1) or a file checkout (retrieving a '
'file from the index, flag=0).'
),
)
def _adjust_args_and_chdir(args: argparse.Namespace) -> None:
# `--config` was specified relative to the non-root working directory
if os.path.exists(args.config):
args.config = os.path.abspath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.abspath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.abspath(args.repo)
try:
toplevel = git.get_root()
except CalledProcessError:
raise FatalError(
'git failed. Is it installed, and are you in a Git repository '
'directory?',
)
else:
if toplevel == '': # pragma: no cover (old git)
raise FatalError(
'git toplevel unexpectedly empty! make sure you are not '
'inside the `.git` directory of your repository.',
)
else:
os.chdir(toplevel)
args.config = os.path.relpath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.relpath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.relpath(args.repo)
def main(argv: Optional[Sequence[str]] = None) -> int:
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser(prog='pre-commit')
# https://stackoverflow.com/a/8521644/812183
parser.add_argument(
'-V', '--version',
action='version',
version=f'%(prog)s {C.VERSION}',
)
subparsers = parser.add_subparsers(dest='command')
autoupdate_parser = subparsers.add_parser(
'autoupdate',
help="Auto-update pre-commit config to the latest repos' versions.",
)
_add_color_option(autoupdate_parser)
_add_config_option(autoupdate_parser)
autoupdate_parser.add_argument(
'--bleeding-edge', action='store_true',
help=(
'Update to the bleeding edge of `master` instead of the latest '
'tagged version (the default behavior).'
),
)
autoupdate_parser.add_argument(
'--freeze', action='store_true',
help='Store "frozen" hashes in `rev` instead of tag names',
)
autoupdate_parser.add_argument(
'--repo', dest='repos', action='append', metavar='REPO',
help='Only update this repository -- may be specified multiple times.',
)
clean_parser = subparsers.add_parser(
'clean', help='Clean out pre-commit files.',
)
_add_color_option(clean_parser)
_add_config_option(clean_parser)
hook_impl_parser = subparsers.add_parser('hook-impl')
_add_color_option(hook_impl_parser)
_add_config_option(hook_impl_parser)
hook_impl_parser.add_argument('--hook-type')
hook_impl_parser.add_argument('--hook-dir')
hook_impl_parser.add_argument(
'--skip-on-missing-config', action='store_true',
)
hook_impl_parser.add_argument(dest='rest', nargs=argparse.REMAINDER)
gc_parser = subparsers.add_parser('gc', help='Clean unused cached repos.')
_add_color_option(gc_parser)
_add_config_option(gc_parser)
init_templatedir_parser = subparsers.add_parser(
'init-templatedir',
help=(
'Install hook script in a directory intended for use with '
'`git config init.templateDir`.'
),
)
_add_color_option(init_templatedir_parser)
_add_config_option(init_templatedir_parser)
init_templatedir_parser.add_argument(
'directory', help='The directory in which to write the hook script.',
)
_add_hook_type_option(init_templatedir_parser)
install_parser = subparsers.add_parser(
'install', help='Install the pre-commit script.',
)
_add_color_option(install_parser)
_add_config_option(install_parser)
install_parser.add_argument(
'-f', '--overwrite', action='store_true',
help='Overwrite existing hooks / remove migration mode.',
)
install_parser.add_argument(
'--install-hooks', action='store_true',
help=(
'Whether to install hook environments for all environments '
'in the config file.'
),
)
_add_hook_type_option(install_parser)
install_parser.add_argument(
'--allow-missing-config', action='store_true', default=False,
help=(
'Whether to allow a missing `pre-commit` configuration file '
'or exit with a failure code.'
),
)
install_hooks_parser = subparsers.add_parser(
'install-hooks',
help=(
'Install hook environments for all environments in the config '
'file. You may find `pre-commit install --install-hooks` more '
'useful.'
),
)
_add_color_option(install_hooks_parser)
_add_config_option(install_hooks_parser)
migrate_config_parser = subparsers.add_parser(
'migrate-config',
help='Migrate list configuration to new map configuration.',
)
_add_color_option(migrate_config_parser)
_add_config_option(migrate_config_parser)
run_parser = subparsers.add_parser('run', help='Run hooks.')
_add_color_option(run_parser)
_add_config_option(run_parser)
_add_run_options(run_parser)
sample_config_parser = subparsers.add_parser(
'sample-config', help=f'Produce a sample {C.CONFIG_FILE} file',
)
_add_color_option(sample_config_parser)
_add_config_option(sample_config_parser)
try_repo_parser = subparsers.add_parser(
'try-repo',
help='Try the hooks in a repository, useful for developing new hooks.',
)
_add_color_option(try_repo_parser)
_add_config_option(try_repo_parser)
try_repo_parser.add_argument(
'repo', help='Repository to source hooks from.',
)
try_repo_parser.add_argument(
'--ref', '--rev',
help=(
'Manually select a rev to run against, otherwise the `HEAD` '
'revision will be used.'
),
)
_add_run_options(try_repo_parser)
uninstall_parser = subparsers.add_parser(
'uninstall', help='Uninstall the pre-commit script.',
)
_add_color_option(uninstall_parser)
_add_config_option(uninstall_parser)
_add_hook_type_option(uninstall_parser)
help = subparsers.add_parser(
'help', help='Show help for a specific command.',
)
help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
# argparse doesn't really provide a way to use a `default` subparser
if len(argv) == 0:
argv = ['run']
args = parser.parse_args(argv)
if args.command == 'help' and args.help_cmd:
parser.parse_args([args.help_cmd, '--help'])
elif args.command == 'help':
parser.parse_args(['--help'])
with error_handler(), logging_handler(args.color):
if args.command not in COMMANDS_NO_GIT:
_adjust_args_and_chdir(args)
git.check_for_cygwin_mismatch()
store = Store()
store.mark_config_used(args.config)
if args.command == 'autoupdate':
return autoupdate(
args.config, store,
tags_only=not args.bleeding_edge,
freeze=args.freeze,
repos=args.repos,
)
elif args.command == 'clean':
return clean(store)
elif args.command == 'gc':
return gc(store)
elif args.command == 'hook-impl':
return hook_impl(
store,
config=args.config,
color=args.color,
hook_type=args.hook_type,
hook_dir=args.hook_dir,
skip_on_missing_config=args.skip_on_missing_config,
args=args.rest[1:],
)
elif args.command == 'install':
return install(
args.config, store,
hook_types=args.hook_types,
overwrite=args.overwrite,
hooks=args.install_hooks,
skip_on_missing_config=args.allow_missing_config,
)
elif args.command == 'init-templatedir':
return init_templatedir(
args.config, store, args.directory,
hook_types=args.hook_types,
)
elif args.command == 'install-hooks':
return install_hooks(args.config, store)
elif args.command == 'migrate-config':
return migrate_config(args.config)
elif args.command == 'run':
return run(args.config, store, args)
elif args.command == 'sample-config':
return sample_config()
elif args.command == 'try-repo':
return try_repo(args)
elif args.command == 'uninstall':
return uninstall(hook_types=args.hook_types)
else:
raise NotImplementedError(
f'Command {args.command} not implemented.',
)
raise AssertionError(
f'Command {args.command} failed to exit with a returncode',
)
if __name__ == '__main__':
exit(main())
| 35.384428
| 79
| 0.638864
|
import argparse
import logging
import os
import sys
from typing import Any
from typing import Optional
from typing import Sequence
from typing import Union
import pre_commit.constants as C
from pre_commit import color
from pre_commit import git
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.clean import clean
from pre_commit.commands.gc import gc
from pre_commit.commands.hook_impl import hook_impl
from pre_commit.commands.init_templatedir import init_templatedir
from pre_commit.commands.install_uninstall import install
from pre_commit.commands.install_uninstall import install_hooks
from pre_commit.commands.install_uninstall import uninstall
from pre_commit.commands.migrate_config import migrate_config
from pre_commit.commands.run import run
from pre_commit.commands.sample_config import sample_config
from pre_commit.commands.try_repo import try_repo
from pre_commit.error_handler import error_handler
from pre_commit.error_handler import FatalError
from pre_commit.logging_handler import logging_handler
from pre_commit.store import Store
from pre_commit.util import CalledProcessError
logger = logging.getLogger('pre_commit')
# pyvenv
os.environ.pop('__PYVENV_LAUNCHER__', None)
COMMANDS_NO_GIT = {'clean', 'gc', 'init-templatedir', 'sample-config'}
def _add_color_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'),
type=color.use_color,
metavar='{' + ','.join(color.COLOR_CHOICES) + '}',
help='Whether to use color in output. Defaults to `%(default)s`.',
)
def _add_config_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-c', '--config', default=C.CONFIG_FILE,
help='Path to alternate config file',
)
class AppendReplaceDefault(argparse.Action):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.appended = False
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[str], None],
option_string: Optional[str] = None,
) -> None:
if not self.appended:
setattr(namespace, self.dest, [])
self.appended = True
getattr(namespace, self.dest).append(values)
def _add_hook_type_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-t', '--hook-type', choices=(
'pre-commit', 'pre-merge-commit', 'pre-push',
'prepare-commit-msg', 'commit-msg', 'post-checkout',
),
action=AppendReplaceDefault,
default=['pre-commit'],
dest='hook_types',
)
def _add_run_options(parser: argparse.ArgumentParser) -> None:
parser.add_argument('hook', nargs='?', help='A single hook-id to run')
parser.add_argument('--verbose', '-v', action='store_true', default=False)
mutex_group = parser.add_mutually_exclusive_group(required=False)
mutex_group.add_argument(
'--all-files', '-a', action='store_true', default=False,
help='Run on all the files in the repo.',
)
mutex_group.add_argument(
'--files', nargs='*', default=[],
help='Specific filenames to run hooks on.',
)
parser.add_argument(
'--show-diff-on-failure', action='store_true',
help='When hooks fail, run `git diff` directly afterward.',
)
parser.add_argument(
'--hook-stage', choices=C.STAGES, default='commit',
help='The stage during which the hook is fired. One of %(choices)s',
)
parser.add_argument(
'--from-ref', '--source', '-s',
help=(
'(for usage with `--from-ref`) -- this option represents the '
'original ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch you are pushing '
'to. '
'For `post-checkout` hooks, this represents the branch that was '
'previously checked out.'
),
)
parser.add_argument(
'--to-ref', '--origin', '-o',
help=(
'(for usage with `--to-ref`) -- this option represents the '
'destination ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch being pushed. '
'For `post-checkout` hooks, this represents the branch that is '
'now checked out.'
),
)
parser.add_argument(
'--commit-msg-filename',
help='Filename to check when running during `commit-msg`',
)
parser.add_argument(
'--remote-name', help='Remote name used by `git push`.',
)
parser.add_argument('--remote-url', help='Remote url used by `git push`.')
parser.add_argument(
'--checkout-type',
help=(
'Indicates whether the checkout was a branch checkout '
'(changing branches, flag=1) or a file checkout (retrieving a '
'file from the index, flag=0).'
),
)
def _adjust_args_and_chdir(args: argparse.Namespace) -> None:
# `--config` was specified relative to the non-root working directory
if os.path.exists(args.config):
args.config = os.path.abspath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.abspath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.abspath(args.repo)
try:
toplevel = git.get_root()
except CalledProcessError:
raise FatalError(
'git failed. Is it installed, and are you in a Git repository '
'directory?',
)
else:
if toplevel == '': # pragma: no cover (old git)
raise FatalError(
'git toplevel unexpectedly empty! make sure you are not '
'inside the `.git` directory of your repository.',
)
else:
os.chdir(toplevel)
args.config = os.path.relpath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.relpath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.relpath(args.repo)
def main(argv: Optional[Sequence[str]] = None) -> int:
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser(prog='pre-commit')
# https://stackoverflow.com/a/8521644/812183
parser.add_argument(
'-V', '--version',
action='version',
version=f'%(prog)s {C.VERSION}',
)
subparsers = parser.add_subparsers(dest='command')
autoupdate_parser = subparsers.add_parser(
'autoupdate',
help="Auto-update pre-commit config to the latest repos' versions.",
)
_add_color_option(autoupdate_parser)
_add_config_option(autoupdate_parser)
autoupdate_parser.add_argument(
'--bleeding-edge', action='store_true',
help=(
'Update to the bleeding edge of `master` instead of the latest '
'tagged version (the default behavior).'
),
)
autoupdate_parser.add_argument(
'--freeze', action='store_true',
help='Store "frozen" hashes in `rev` instead of tag names',
)
autoupdate_parser.add_argument(
'--repo', dest='repos', action='append', metavar='REPO',
help='Only update this repository -- may be specified multiple times.',
)
clean_parser = subparsers.add_parser(
'clean', help='Clean out pre-commit files.',
)
_add_color_option(clean_parser)
_add_config_option(clean_parser)
hook_impl_parser = subparsers.add_parser('hook-impl')
_add_color_option(hook_impl_parser)
_add_config_option(hook_impl_parser)
hook_impl_parser.add_argument('--hook-type')
hook_impl_parser.add_argument('--hook-dir')
hook_impl_parser.add_argument(
'--skip-on-missing-config', action='store_true',
)
hook_impl_parser.add_argument(dest='rest', nargs=argparse.REMAINDER)
gc_parser = subparsers.add_parser('gc', help='Clean unused cached repos.')
_add_color_option(gc_parser)
_add_config_option(gc_parser)
init_templatedir_parser = subparsers.add_parser(
'init-templatedir',
help=(
'Install hook script in a directory intended for use with '
'`git config init.templateDir`.'
),
)
_add_color_option(init_templatedir_parser)
_add_config_option(init_templatedir_parser)
init_templatedir_parser.add_argument(
'directory', help='The directory in which to write the hook script.',
)
_add_hook_type_option(init_templatedir_parser)
install_parser = subparsers.add_parser(
'install', help='Install the pre-commit script.',
)
_add_color_option(install_parser)
_add_config_option(install_parser)
install_parser.add_argument(
'-f', '--overwrite', action='store_true',
help='Overwrite existing hooks / remove migration mode.',
)
install_parser.add_argument(
'--install-hooks', action='store_true',
help=(
'Whether to install hook environments for all environments '
'in the config file.'
),
)
_add_hook_type_option(install_parser)
install_parser.add_argument(
'--allow-missing-config', action='store_true', default=False,
help=(
'Whether to allow a missing `pre-commit` configuration file '
'or exit with a failure code.'
),
)
install_hooks_parser = subparsers.add_parser(
'install-hooks',
help=(
'Install hook environments for all environments in the config '
'file. You may find `pre-commit install --install-hooks` more '
'useful.'
),
)
_add_color_option(install_hooks_parser)
_add_config_option(install_hooks_parser)
migrate_config_parser = subparsers.add_parser(
'migrate-config',
help='Migrate list configuration to new map configuration.',
)
_add_color_option(migrate_config_parser)
_add_config_option(migrate_config_parser)
run_parser = subparsers.add_parser('run', help='Run hooks.')
_add_color_option(run_parser)
_add_config_option(run_parser)
_add_run_options(run_parser)
sample_config_parser = subparsers.add_parser(
'sample-config', help=f'Produce a sample {C.CONFIG_FILE} file',
)
_add_color_option(sample_config_parser)
_add_config_option(sample_config_parser)
try_repo_parser = subparsers.add_parser(
'try-repo',
help='Try the hooks in a repository, useful for developing new hooks.',
)
_add_color_option(try_repo_parser)
_add_config_option(try_repo_parser)
try_repo_parser.add_argument(
'repo', help='Repository to source hooks from.',
)
try_repo_parser.add_argument(
'--ref', '--rev',
help=(
'Manually select a rev to run against, otherwise the `HEAD` '
'revision will be used.'
),
)
_add_run_options(try_repo_parser)
uninstall_parser = subparsers.add_parser(
'uninstall', help='Uninstall the pre-commit script.',
)
_add_color_option(uninstall_parser)
_add_config_option(uninstall_parser)
_add_hook_type_option(uninstall_parser)
help = subparsers.add_parser(
'help', help='Show help for a specific command.',
)
help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
if len(argv) == 0:
argv = ['run']
args = parser.parse_args(argv)
if args.command == 'help' and args.help_cmd:
parser.parse_args([args.help_cmd, '--help'])
elif args.command == 'help':
parser.parse_args(['--help'])
with error_handler(), logging_handler(args.color):
if args.command not in COMMANDS_NO_GIT:
_adjust_args_and_chdir(args)
git.check_for_cygwin_mismatch()
store = Store()
store.mark_config_used(args.config)
if args.command == 'autoupdate':
return autoupdate(
args.config, store,
tags_only=not args.bleeding_edge,
freeze=args.freeze,
repos=args.repos,
)
elif args.command == 'clean':
return clean(store)
elif args.command == 'gc':
return gc(store)
elif args.command == 'hook-impl':
return hook_impl(
store,
config=args.config,
color=args.color,
hook_type=args.hook_type,
hook_dir=args.hook_dir,
skip_on_missing_config=args.skip_on_missing_config,
args=args.rest[1:],
)
elif args.command == 'install':
return install(
args.config, store,
hook_types=args.hook_types,
overwrite=args.overwrite,
hooks=args.install_hooks,
skip_on_missing_config=args.allow_missing_config,
)
elif args.command == 'init-templatedir':
return init_templatedir(
args.config, store, args.directory,
hook_types=args.hook_types,
)
elif args.command == 'install-hooks':
return install_hooks(args.config, store)
elif args.command == 'migrate-config':
return migrate_config(args.config)
elif args.command == 'run':
return run(args.config, store, args)
elif args.command == 'sample-config':
return sample_config()
elif args.command == 'try-repo':
return try_repo(args)
elif args.command == 'uninstall':
return uninstall(hook_types=args.hook_types)
else:
raise NotImplementedError(
f'Command {args.command} not implemented.',
)
raise AssertionError(
f'Command {args.command} failed to exit with a returncode',
)
if __name__ == '__main__':
exit(main())
| true
| true
|
790b34d7ab15c08ea0abc256b7e65c13114a4651
| 45,383
|
py
|
Python
|
tensorflow_model_analysis/api/model_eval_lib.py
|
Bobgy/model-analysis
|
a964d2e8430b447c898d271fb6e6d8f5b99adf4b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/api/model_eval_lib.py
|
Bobgy/model-analysis
|
a964d2e8430b447c898d271fb6e6d8f5b99adf4b
|
[
"Apache-2.0"
] | 1
|
2020-03-03T03:34:37.000Z
|
2020-03-03T03:34:37.000Z
|
tensorflow_model_analysis/api/model_eval_lib.py
|
Bobgy/model-analysis
|
a964d2e8430b447c898d271fb6e6d8f5b99adf4b
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
# We don't actually do any checking for now, since we don't have any
# compatibility issues.
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
"""Returns eval config, data location, file format, and model locations."""
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
# Legacy suppport (to be removed in future).
# The previous version did not include file extension.
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
# The field slicing_metrics is a nested dictionaries representing metrics for
# different configuration as defined by MetricKey in metrics_for_slice.proto.
# The levels corresponds to output name, class id, metric name and metric value
# in this order. Note MetricValue uses oneof so metric values will always
# contain only a single key representing the type in the oneof and the actual
# metric value is in the value.
EvalResult = NamedTuple( # pylint: disable=invalid-name
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
"""Read and deserialize the ValidationResult."""
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
"""Class for results from multiple model analysis run."""
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An EvalResults containing all evaluation results. This can be used to
construct a time series view.
"""
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
output_paths: A list of output paths of completed tfma runs.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: The name of the model if multiple models are evaluated together.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
"""Creates an EvalResult object for use with the visualization functions."""
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
eval_config: Eval config. Only used for setting default tags.
"""
if tags is None:
if eval_config:
# Default to serving unless all the signature_names are eval. We do not
# support running with a mixture of eval and non-eval tags.
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if tags == [eval_constants.EVAL_TAG]:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or dict of shared
models keyed by model name (multi-model evaluation). Required unless the
predictions are provided alongside of the features (i.e. model-agnostic
evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
materialize: True to have extractors create materialized output.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if there are metrics to be computed in-graph using the model.
eval_config: Eval config.
compute_confidence_intervals: Deprecated (use eval_config).
k_anonymization_count: Deprecated (use eval_config).
desired_batch_size: Optional batch size for batching in combiner.
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
# no in-graph metrics present when tflite is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if legacy add_metrics_callbacks are used.
"""
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection):
"""Converts serialized inputs (e.g. examples) to Extracts."""
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig( # pylint: disable=invalid-name
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
"""Writes EvalConfig to file.
Args:
pipeline: Beam pipeline.
eval_config: EvalConfig.
output_path: Output path.
data_location: Optional location for data used with config.
file_format: Optional format for data used with config.
model_locations: Optional location(s) for model(s) used with config.
Returns:
beam.pvalue.PDone.
"""
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples. Can be any format the model accepts
(e.g. string containing CSV row, TensorFlow.Example, etc).
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers and for
display purposes of the model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output metrics and plots results.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
# pylint: disable=no-value-for-parameter
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
# pylint: enable=no-value-for-parameter
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or dict
of shared models keyed by model name (multi-model evaluation). Only
required if needed by default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
k_anonymization_count: Deprecated (use EvalConfig).
desired_batch_size: Optional batch size for batching in Predict.
random_seed_for_testing: Provide for deterministic tests only.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
slice_spec: A list of tfma.slicer.SingleSliceSpec.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
| 42.138347
| 82
| 0.721019
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import tempfile
from typing import Any, Dict, List, NamedTuple, Optional, Text, Tuple, Union
import apache_beam as beam
import six
import tensorflow as tf
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis import version as tfma_version
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import predict_extractor_v2
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.writers import metrics_and_plots_serialization
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from google.protobuf import json_format
_EVAL_CONFIG_FILE = 'eval_config.json'
def _assert_tensorflow_version():
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
tf.compat.v1.logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta' % tf.version.VERSION)
def _check_version(version: Text, path: Text):
if not version:
raise ValueError(
'could not find TFMA version in raw deserialized dictionary for '
'file at %s' % path)
def _is_legacy_eval(eval_shared_model: Optional[types.EvalSharedModel],
eval_config: Optional[config.EvalConfig]):
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _serialize_eval_run(eval_config: config.EvalConfig, data_location: Text,
file_format: Text, model_locations: Dict[Text,
Text]) -> Text:
return json_format.MessageToJson(
config_pb2.EvalRun(
eval_config=eval_config,
version=tfma_version.VERSION_STRING,
data_location=data_location,
file_format=file_format,
model_locations=model_locations))
def _load_eval_run(
output_path: Text
) -> Tuple[config.EvalConfig, Text, Text, Dict[Text, Text]]:
path = os.path.join(output_path, _EVAL_CONFIG_FILE)
if tf.io.gfile.exists(path):
with tf.io.gfile.GFile(path, 'r') as f:
pb = json_format.Parse(f.read(), config_pb2.EvalRun())
_check_version(pb.version, output_path)
return (pb.eval_config, pb.data_location, pb.file_format,
pb.model_locations)
else:
path = os.path.splitext(path)[0]
serialized_record = six.next(
tf.compat.v1.python_io.tf_record_iterator(path))
final_dict = pickle.loads(serialized_record)
_check_version(final_dict, output_path)
old_config = final_dict['eval_config']
slicing_specs = None
if old_config.slice_spec:
slicing_specs = [s.to_proto() for s in old_config.slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = (
old_config.compute_confidence_intervals)
options.k_anonymization_count.value = old_config.k_anonymization_count
return (config.EvalConfig(slicing_specs=slicing_specs,
options=options), old_config.data_location, '', {
'': old_config.model_location
})
EvalResult = NamedTuple(
'EvalResult',
[('slicing_metrics',
List[Tuple[slicer.SliceKeyType,
Dict[Text, Dict[Text, Dict[Text, Dict[Text, Dict[Text,
Any]]]]]]]),
('plots', List[Tuple[slicer.SliceKeyType, Dict[Text, Any]]]),
('config', config.EvalConfig), ('data_location', Text),
('file_format', Text), ('model_location', Text)])
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(
validations_file: Text) -> Optional[ValidationResult]:
validation_records = []
for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):
validation_records.append(ValidationResult.FromString(record))
if validation_records:
assert len(validation_records) == 1
return validation_records[0]
class EvalResults(object):
def __init__(self,
results: List[EvalResult],
mode: Text = constants.UNKNOWN_EVAL_MODE):
supported_modes = [
constants.DATA_CENTRIC_MODE,
constants.MODEL_CENTRIC_MODE,
]
if mode not in supported_modes:
raise ValueError('Mode ' + mode + ' must be one of ' +
Text(supported_modes))
self._results = results
self._mode = mode
def get_results(self) -> List[EvalResult]:
return self._results
def get_mode(self) -> Text:
return self._mode
def make_eval_results(results: List[EvalResult], mode: Text) -> EvalResults:
return EvalResults(results, mode)
def load_eval_results(output_paths: List[Text],
mode: Text,
model_name: Optional[Text] = None) -> EvalResults:
results = [
load_eval_result(output_path, model_name=model_name)
for output_path in output_paths
]
return make_eval_results(results, mode)
def load_eval_result(output_path: Text,
model_name: Optional[Text] = None) -> EvalResult:
eval_config, data_location, file_format, model_locations = (
_load_eval_run(output_path))
metrics_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_metrics(
path=os.path.join(output_path, constants.METRICS_KEY),
model_name=model_name))
plots_proto_list = (
metrics_and_plots_serialization.load_and_deserialize_plots(
path=os.path.join(output_path, constants.PLOTS_KEY)))
if model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return EvalResult(
slicing_metrics=metrics_proto_list,
plots=plots_proto_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
eval_config: Optional[config.EvalConfig] = None) -> types.EvalSharedModel:
if tags is None:
if eval_config:
signatures = [s.signature_name for s in eval_config.model_specs]
if eval_constants.EVAL_TAG in signatures:
if not all(s == eval_constants.EVAL_TAG for s in signatures):
tf.compat.v1.logging.warning(
'mixture of eval and non-eval signatures used: '
'eval_config={}'.format(eval_config))
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
else:
tags = [eval_constants.EVAL_TAG]
if tags == [eval_constants.EVAL_TAG]:
# Additionally, the lines seem to get reordered in compilation, so we can't
if not add_metrics_callbacks:
add_metrics_callbacks = []
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
return types.EvalSharedModel(
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=types.ModelLoader(
tags=tags,
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
tags=tags)))
def default_extractors(
eval_shared_model: Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
desired_batch_size: Optional[int] = None,
materialize: Optional[bool] = True) -> List[extractor.Extractor]:
if eval_config is not None:
eval_config = config.update_eval_config_with_defaults(eval_config)
slice_spec = [
slicer.SingleSliceSpec(spec=spec) for spec in eval_config.slicing_specs
]
if _is_legacy_eval(eval_shared_model, eval_config):
return [
predict_extractor.PredictExtractor(
eval_shared_model, desired_batch_size, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif eval_shared_model:
model_types = model_util.get_model_types(eval_config)
if not model_types.issubset(constants.VALID_MODEL_TYPES):
raise NotImplementedError(
'model type must be one of: {}. evalconfig={}'.format(
str(constants.VALID_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and all(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
return [
predict_extractor.PredictExtractor(
eval_shared_model,
desired_batch_size,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
elif (eval_config and any(s.signature_name == eval_constants.EVAL_TAG
for s in eval_config.model_specs)):
raise NotImplementedError(
'support for mixing eval and non-eval models is not implemented: '
'eval_config={}'.format(eval_config))
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
predict_extractor_v2.PredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
else:
return [
input_extractor.InputExtractor(eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
slice_spec, materialize=materialize)
]
def default_evaluators(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
disabled_outputs = []
if eval_config:
eval_config = config.update_eval_config_with_defaults(eval_config)
disabled_outputs = eval_config.options.disabled_outputs.values
if model_util.get_model_types(eval_config) == set([constants.TF_LITE]):
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('k_anonymization_count'):
k_anonymization_count = eval_config.options.k_anonymization_count.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
k_anonymization_count=k_anonymization_count,
desired_batch_size=desired_batch_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config, eval_shared_model=eval_shared_model)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text, types.EvalSharedModel]]] = None
) -> List[writer.Writer]:
add_metric_callbacks = []
if eval_shared_model and not isinstance(eval_shared_model, dict):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
return [
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
add_metrics_callbacks=add_metric_callbacks),
]
@beam.ptransform_fn
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts(
inputs: beam.pvalue.PCollection):
return inputs | beam.Map(lambda x: {constants.INPUT_KEY: x})
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(evaluator.Evaluation)
def ExtractAndEvaluate(
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]):
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[evaluator.Evaluation,
validator.Validation])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults(
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]):
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteEvalConfig(
pipeline: beam.Pipeline,
eval_config: config.EvalConfig,
output_path: Text,
data_location: Optional[Text] = '',
file_format: Optional[Text] = '',
model_locations: Optional[Dict[Text, Text]] = None):
return (
pipeline
| 'CreateEvalConfig' >> beam.Create([
_serialize_eval_run(eval_config, data_location, file_format,
model_locations)
])
| 'WriteEvalConfig' >> beam.io.WriteToText(
os.path.join(output_path, _EVAL_CONFIG_FILE), shard_name_template=''))
@beam.ptransform_fn
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults(
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PDone:
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
if eval_config is None:
model_specs = []
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
else:
eval_config = config.update_eval_config_with_defaults(eval_config)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
desired_batch_size=desired_batch_size)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path, eval_shared_model=eval_shared_model)
_ = (
examples
| 'InputsToExtracts' >> InputsToExtracts()
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
if _EVAL_CONFIG_FILE not in eval_config.options.disabled_outputs.values:
data_location = '<user provided PCollection>'
if display_only_data_location is not None:
data_location = display_only_data_location
file_format = '<unknown>'
if display_only_file_format is not None:
file_format = display_only_file_format
model_locations = {}
for k, v in eval_shared_models.items():
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
_ = (
examples.pipeline
| WriteEvalConfig(eval_config, output_path, data_location, file_format,
model_locations))
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[Union[types.EvalSharedModel,
Dict[Text,
types.EvalSharedModel]]] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
k_anonymization_count: int = 1,
desired_batch_size: Optional[int] = None,
random_seed_for_testing: Optional[int] = None
) -> Union[EvalResult, EvalResults]:
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
model_specs = []
eval_shared_models = eval_shared_model
if not isinstance(eval_shared_model, dict):
eval_shared_models = {'': eval_shared_model}
for model_name, shared_model in eval_shared_models.items():
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.k_anonymization_count.value = k_anonymization_count
if not write_config:
options.disabled_outputs.values.append(_EVAL_CONFIG_FILE)
eval_config = config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
desired_batch_size=desired_batch_size,
random_seed_for_testing=random_seed_for_testing))
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None) -> EvalResult:
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path)
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> EvalResults:
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> EvalResults:
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return EvalResults(results, constants.DATA_CENTRIC_MODE)
| true
| true
|
790b35051ca753b6a976f562d7c99939df28b5f2
| 269
|
py
|
Python
|
src/hdcp_source.py
|
imamotts/hdcp_test
|
0830829037a8d8e107f75e9a306179bf3f164d10
|
[
"MIT"
] | 3
|
2017-01-06T08:24:44.000Z
|
2020-07-13T03:19:58.000Z
|
src/hdcp_source.py
|
imamotts/hdcp_test
|
0830829037a8d8e107f75e9a306179bf3f164d10
|
[
"MIT"
] | null | null | null |
src/hdcp_source.py
|
imamotts/hdcp_test
|
0830829037a8d8e107f75e9a306179bf3f164d10
|
[
"MIT"
] | 1
|
2021-10-13T08:55:47.000Z
|
2021-10-13T08:55:47.000Z
|
import yaml
class HdcpSource:
def __init__(self, conf_yaml):
f = open(conf_yaml, "r")
conf = yaml.load(f)
f.close()
def process_request(req):
msg_type, msg = req
if __name__ == "__main__":
HdcpSource("yaml/rx1.yaml")
| 14.157895
| 34
| 0.583643
|
import yaml
class HdcpSource:
def __init__(self, conf_yaml):
f = open(conf_yaml, "r")
conf = yaml.load(f)
f.close()
def process_request(req):
msg_type, msg = req
if __name__ == "__main__":
HdcpSource("yaml/rx1.yaml")
| true
| true
|
790b3807375d1144379c018e82b0ad95968c4ace
| 205
|
py
|
Python
|
examples/get_file_name_demo.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | 2
|
2020-01-23T02:03:19.000Z
|
2020-12-13T09:05:45.000Z
|
examples/get_file_name_demo.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | null | null | null |
examples/get_file_name_demo.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/7/7 9:11
# @Author : lightsmile
# @Software: PyCharm
from lightutils import get_file_name
if __name__ == '__main__':
print(get_file_name("hello_world.py"))
| 20.5
| 42
| 0.663415
|
from lightutils import get_file_name
if __name__ == '__main__':
print(get_file_name("hello_world.py"))
| true
| true
|
790b398ee916546032454898d539b421420123e1
| 329
|
py
|
Python
|
examples/crashing.py
|
diconart/zerogw
|
6bd124a470424fbf32b3a2fb51b6fbd8c4ca2f13
|
[
"MIT"
] | 83
|
2015-01-11T06:08:43.000Z
|
2021-03-31T00:41:11.000Z
|
examples/crashing.py
|
asvetlov/zerogw
|
b8728862ba28368b8d306c1b674c9c264a25b575
|
[
"MIT"
] | 7
|
2015-05-26T12:03:35.000Z
|
2020-02-25T11:28:33.000Z
|
examples/crashing.py
|
asvetlov/zerogw
|
b8728862ba28368b8d306c1b674c9c264a25b575
|
[
"MIT"
] | 12
|
2015-04-20T16:29:25.000Z
|
2019-03-15T14:04:58.000Z
|
import zmq
import random
ctx = zmq.Context(1)
sock = ctx.socket(zmq.REP)
sock.connect('tcp://127.0.0.1:7001')
while True:
parts = sock.recv_multipart()
print("GOT", parts)
if random.randrange(3) == 0:
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
sock.send(b"Echo: " + b' '.join(parts))
| 23.5
| 44
| 0.62614
|
import zmq
import random
ctx = zmq.Context(1)
sock = ctx.socket(zmq.REP)
sock.connect('tcp://127.0.0.1:7001')
while True:
parts = sock.recv_multipart()
print("GOT", parts)
if random.randrange(3) == 0:
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
sock.send(b"Echo: " + b' '.join(parts))
| true
| true
|
790b3b06a9335fb7a81c645f577f8954748f0c35
| 4,030
|
py
|
Python
|
Raw_Socket_Protos/rawIPV4.py
|
UsamaMehboob/RawSocketsPython
|
04277efed77a75653cc0f006c787659bd8beac32
|
[
"MIT"
] | 1
|
2020-04-08T09:29:03.000Z
|
2020-04-08T09:29:03.000Z
|
Raw_Socket_Protos/rawIPV4.py
|
UsamaMehboob/RawSocketsPython
|
04277efed77a75653cc0f006c787659bd8beac32
|
[
"MIT"
] | null | null | null |
Raw_Socket_Protos/rawIPV4.py
|
UsamaMehboob/RawSocketsPython
|
04277efed77a75653cc0f006c787659bd8beac32
|
[
"MIT"
] | 1
|
2020-04-08T09:29:12.000Z
|
2020-04-08T09:29:12.000Z
|
import struct
import socket
import ipaddress
from .utils import calculate_checksum
IPV4_HEAD_FMT="!BBHHHBBHII" #H is unsigned short (2 bytes) ! is for network (big-endian)
class IPV4Datagram:
"""
This class contains 20 bytes IPV4 Datagram
https://en.wikipedia.org/wiki/IPv4
|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|
---------------------------------------------------------------------------------------
|version| IHL | DSCP | ECN | Total Length |
---------------------------------------------------------------------------------------
| identification | flags | Fragemnt Offset |
---------------------------------------------------------------------------------------
| TTL | Protocol | Header Checksum |
---------------------------------------------------------------------------------------
| Source Ip Address |
---------------------------------------------------------------------------------------
| Destination Ip Address |
---------------------------------------------------------------------------------------
"""
def __init__(self, source_ip="1.1.1.1",destination_ip="1.1.1.1" , version=4, ihl=5, tos=0,identification=54321,fragment_offset = 0,
ttl=253,protocol = socket.IPPROTO_UDP,data='', checksum=0):
self.version = version
self.ihl = ihl
self.version_ihl = (self.version << 4) + self.ihl
self.tos = tos
self.identification=identification
self.fragment_offset = fragment_offset
self.ttl = ttl
self.protocol = protocol
self.checksum = checksum
self.source_ip =int(ipaddress.IPv4Address( source_ip )) # convert into integer
self.destination_ip = int(ipaddress.IPv4Address(destination_ip ))
self.data = data
self.length= 4 * self.ihl + len(self.data)
def __repr__(self):
return 'ICMPDatagram({},{},({},{}))'.format(self.type,self.code,self.checksum, self.data)
def pack(self):
ipv4_header = struct.pack(IPV4_HEAD_FMT, self.version_ihl,self.tos,self.length, self.identification,
self.fragment_offset, self.ttl, self.protocol, self.checksum, self.source_ip, self.destination_ip)
self.checksum = calculate_checksum(ipv4_header)
ipv4_header = struct.pack(IPV4_HEAD_FMT, self.version_ihl,self.tos,self.length, self.identification,
self.fragment_offset, self.ttl, self.protocol, self.checksum, self.source_ip, self.destination_ip)
return ipv4_header
def unpack(self, buffer):
ipv4_header_size = struct.calcsize(IPV4_HEAD_FMT)
ipv4_header_packed = buffer[:ipv4_header_size]
ipv4_header_unpacked = struct.unpack(IPV4_HEAD_FMT,ipv4_header_packed)
self.version_ihl = ipv4_header_unpacked[0]
self.ihl = self.version_ihl & 0xf
self.version = self.version_ihl >> 4
self.tos = ipv4_header_unpacked[1]
self.length = ipv4_header_unpacked[2]
self.identification = ipv4_header_unpacked[3]
self.fragment_offset = ipv4_header_unpacked[4]
self.ttl = ipv4_header_unpacked[5]
self.protocol = ipv4_header_unpacked[6]
self.checksum = ipv4_header_unpacked[7]
self.source_ip = str(ipaddress.IPv4Address(ipv4_header_unpacked[8] ))
self.destination_ip= str(ipaddress.IPv4Address(ipv4_header_unpacked[9] ))
self.data = buffer[ipv4_header_size:]
#print ("source ip == " + str( ipaddress.IPv4Address(self.source_ip)))
#print ("destination ip == " + str( ipaddress.IPv4Address(self.destination_ip)))
#print ("checksum = "+ str(self.checksum))
#print ("ttl == " + str(self.ttl))
| 48.554217
| 135
| 0.531762
|
import struct
import socket
import ipaddress
from .utils import calculate_checksum
IPV4_HEAD_FMT="!BBHHHBBHII"
class IPV4Datagram:
def __init__(self, source_ip="1.1.1.1",destination_ip="1.1.1.1" , version=4, ihl=5, tos=0,identification=54321,fragment_offset = 0,
ttl=253,protocol = socket.IPPROTO_UDP,data='', checksum=0):
self.version = version
self.ihl = ihl
self.version_ihl = (self.version << 4) + self.ihl
self.tos = tos
self.identification=identification
self.fragment_offset = fragment_offset
self.ttl = ttl
self.protocol = protocol
self.checksum = checksum
self.source_ip =int(ipaddress.IPv4Address( source_ip ))
self.destination_ip = int(ipaddress.IPv4Address(destination_ip ))
self.data = data
self.length= 4 * self.ihl + len(self.data)
def __repr__(self):
return 'ICMPDatagram({},{},({},{}))'.format(self.type,self.code,self.checksum, self.data)
def pack(self):
ipv4_header = struct.pack(IPV4_HEAD_FMT, self.version_ihl,self.tos,self.length, self.identification,
self.fragment_offset, self.ttl, self.protocol, self.checksum, self.source_ip, self.destination_ip)
self.checksum = calculate_checksum(ipv4_header)
ipv4_header = struct.pack(IPV4_HEAD_FMT, self.version_ihl,self.tos,self.length, self.identification,
self.fragment_offset, self.ttl, self.protocol, self.checksum, self.source_ip, self.destination_ip)
return ipv4_header
def unpack(self, buffer):
ipv4_header_size = struct.calcsize(IPV4_HEAD_FMT)
ipv4_header_packed = buffer[:ipv4_header_size]
ipv4_header_unpacked = struct.unpack(IPV4_HEAD_FMT,ipv4_header_packed)
self.version_ihl = ipv4_header_unpacked[0]
self.ihl = self.version_ihl & 0xf
self.version = self.version_ihl >> 4
self.tos = ipv4_header_unpacked[1]
self.length = ipv4_header_unpacked[2]
self.identification = ipv4_header_unpacked[3]
self.fragment_offset = ipv4_header_unpacked[4]
self.ttl = ipv4_header_unpacked[5]
self.protocol = ipv4_header_unpacked[6]
self.checksum = ipv4_header_unpacked[7]
self.source_ip = str(ipaddress.IPv4Address(ipv4_header_unpacked[8] ))
self.destination_ip= str(ipaddress.IPv4Address(ipv4_header_unpacked[9] ))
self.data = buffer[ipv4_header_size:]
| true
| true
|
790b3dcead80e5e0b06be7a487cb1e8db6cf319d
| 10,880
|
py
|
Python
|
bot.py
|
XMYSTERlOUSX/ssh-creator-bot
|
68452adac6dfcc81629cd8d23d7631827e12c6b6
|
[
"Apache-2.0"
] | null | null | null |
bot.py
|
XMYSTERlOUSX/ssh-creator-bot
|
68452adac6dfcc81629cd8d23d7631827e12c6b6
|
[
"Apache-2.0"
] | null | null | null |
bot.py
|
XMYSTERlOUSX/ssh-creator-bot
|
68452adac6dfcc81629cd8d23d7631827e12c6b6
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
import time
import os
try:
import telepot
from telepot.loop import MessageLoop
except:
os.system('pip install telepot --user')
try:
import requests
except:
os.system('pip install requests --user')
class host:
def __init__(self, host):
h = host.replace('http://', '')
h = host.replace('https://', '')
self.host = host
self.h = h
x = requests.get(url='https://api.hackertarget.com/dnslookup/?q='+self.h)
dns = x.text.split("\n")[0].split(":")[1].strip()
self.dns = dns
def port(self, chat):
x = requests.get(url='https://api.hackertarget.com/nmap/?q='+self.dns)
bot.sendMessage(chat, x.text)
def lookup(self, chat):
bot.sendMessage(chat, self.dns)
def header(self, chat):
xx = requests.get(url='https://api.hackertarget.com/httpheaders/?q='+self.host)
bot.sendMessage(chat, xx.text)
def links(self, chat):
zz = requests.get(url='https://api.hackertarget.com/pagelinks/?q='+self.h)
bot.sendMessage(chat, zz.text)
#print(host('https://vodafone.com.eg').links('asd'))
class ssh:
def __init__(self, ids_list):
self.session = requests.Session()
self.username = "".join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(10, 12)))
self.password = "sshDieProfis"
self.servers_id = ids_list
def main(self, chat):
current_id = random.choice(self.servers_id)
url = "https://www.speedssh.com/"
req = self.session.get(url)
cookies = dict(req.cookies)
url = "https://www.speedssh.com/create-account-ssh.php"
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'www.speedssh.com',
'Origin': 'https://www.speedssh.com',
'Referer': 'https://www.speedssh.com/create-ssh-account-server/30/ssh-server-united-states-1',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36 OPR/74.0.3911.75',
'X-Requested-With': 'XMLHttpRequest',
}
data = f"serverid={current_id}&username={self.username}&password={self.password}"
req = self.session.post(url, headers=headers, data=data)
if "Your Account has been successfully created" in req.text:
host_ip = req.text.split("<br>")[6].split(":")[1].strip()
all_info = f"{host_ip}:443@speedssh.com-{self.username}:{self.password}"
ex = req.text.split("<br>")[8]
alls=f"host : {host_ip} \nusername : speedssh.com-{self.username}\npass : {self.password}\nhttp_custom : {host_ip}:443@speedssh.com-{self.username}:{self.password}\n{ex}"
bot.sendMessage(chat, alls)
return alls
elif "has reached Account maximum" in req.text:
self.servers_id.remove(current_id)
self.main(chat)
else:
self.servers_id.remove(current_id)
self.main(chat)
class ssl:
def __init__(self, ids_list):
self.session = requests.Session()
self.username = "".join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(10, 12)))
self.password = "sslDieProfis"
self.servers_id = ids_list
def main(self, chat):
current_id = random.choice(self.servers_id)
url = "https://www.speedssh.com/"
req = self.session.get(url)
cookies = dict(req.cookies)
url = "https://www.speedssh.com/create-account-ssl.php"
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'www.speedssh.com',
'Origin': 'https://www.speedssh.com',
'Referer': 'https://www.speedssh.com/create-ssl-account-server/230/server-us-ssl/tls-1',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36 OPR/74.0.3911.75',
'X-Requested-With': 'XMLHttpRequest',
}
data = f"serverid={current_id}&username={self.username}&password={self.password}"
req = self.session.post(url, headers=headers, data=data)
if "Your Account has been successfully created" in req.text:
host_ip = req.text.split("<br>")[4].split(":")[1].strip()
all_info = f"{host_ip}:443@speedssh.com-{self.username}:{self.password}"
ex = req.text.split("<br>")[6]
alls=f"host : {host_ip} \nusername : speedssh.com-{self.username}\npass : {self.password}\nhttp_custom : {host_ip}:443@speedssh.com-{self.username}:{self.password}\n{ex}"
bot.sendMessage(chat, alls)
return alls
elif "has reached Account maximum" in req.text:
self.servers_id.remove(current_id)
self.main(chat)
else:
self.servers_id.remove(current_id)
self.main(chat)
serope = ["44", "46", "48", "50"]
sasia = ["36", "38", "40", "42"]
samrica = ["30", "32", "34"]
lerope = ["256", "252", "254", "256", "252"]
lasia = ["244", "238", "240", "242", "246", "248"]
lamrica = ["230", "234", "236"]
def substr(string, start, length = None):
if start < 0:
start = start + len(string)
if not length:
return string[start:]
elif length > 0:
return string[start:start + length]
else:
return string[start:length]
def bot_msg(msg):
chat_id = msg['chat']['id']
command = msg['text']
a=command
if '0' in str(command.find('/ssl')):
one = a.find('-num ')+5
one2 = a.find('-plc') - one - 1
one3 = substr(a, one, one2)
two = a.find('-plc ')+5
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/ssl -num '+one3+' -plc '+two3+';'
if string in a:
if two3 == 'er':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lerope)
x = creator.main(chat_id)
elif two3 == 'ar':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lamrica)
x = creator.main(chat_id)
elif two3 == 'as':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lasia)
x = creator.main(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible place please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/ssh')):
one = a.find('-num ')+5
one2 = a.find('-plc') - one - 1
one3 = substr(a, one, one2)
two = a.find('-plc ')+5
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/ssh -num '+one3+' -plc '+two3+';'
if string in a:
if two3 == 'er':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(serope)
x = creator.main(chat_id)
elif two3 == 'ar':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(samrica)
x = creator.main(chat_id)
elif two3 == 'as':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(sasia)
x = creator.main(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible place please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/host')):
one = a.find('-t ')+3
one2 = a.find('-h') - one - 1
one3 = substr(a, one, one2)
two = a.find('-h ')+3
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/host -t '+one3+' -h '+two3+';'
if string in a:
if one3 == 'port':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).port(chat_id)
elif one3 == 'lookup':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).lookup(chat_id)
elif one3 == 'header':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).header(chat_id)
elif one3 == 'links':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).links(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible type please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/help')):
helps = 'welcome to Die Profis bot\nlist of classes:\n host\n ssh\n ssl\n trojan (coming soon in new update 24/2/2021)\n proxy (coming soon in new update 24/2/2021)\n create dns server (coming soon in new update 24/2/2021)\nclass host:\n syntax:\n /host -t <select type> -h <host>;\n list of options (types):\n -port -> check open ports in host\n -header -> get headers from host\n -lookup -> get ip from host (dns)\n -links -> show other links for host\n -test -> for test inject (coming soon in new update 24/2/2021)\n test:\n /host -t port -h vodafone.com.eg;\n\nclass ssh:\n syntax:\n /ssh -num <num of account> -plc <place>\n list of places:\n -er\n -as\n -am\n -num -> number of accounts\n test:\n /ssl -num 5 -plc er;\n \nclass ssl:\n syntax:\n /ssl -num <num of account> -plc <place>\n list of places:\n -er\n -as\n -am\n -num -> number of accounts\n test:\n /ssl -num 5 -plc er;\n\n '
bot.sendMessage(chat_id, helps)
elif '0' in str(command.find('/start')):
bot.sendMessage(chat_id, 'welcome mr:.... (what\'s your name )')
bot = telepot.Bot('1871071012:AAF4U-vLrGSitG_qJVBjyc6bPBes-gozMOc')
MessageLoop(bot, bot_msg).run_as_thread()
while 1:
time.sleep(1)
| 44.048583
| 1,093
| 0.536581
|
import random
import string
import time
import os
try:
import telepot
from telepot.loop import MessageLoop
except:
os.system('pip install telepot --user')
try:
import requests
except:
os.system('pip install requests --user')
class host:
def __init__(self, host):
h = host.replace('http://', '')
h = host.replace('https://', '')
self.host = host
self.h = h
x = requests.get(url='https://api.hackertarget.com/dnslookup/?q='+self.h)
dns = x.text.split("\n")[0].split(":")[1].strip()
self.dns = dns
def port(self, chat):
x = requests.get(url='https://api.hackertarget.com/nmap/?q='+self.dns)
bot.sendMessage(chat, x.text)
def lookup(self, chat):
bot.sendMessage(chat, self.dns)
def header(self, chat):
xx = requests.get(url='https://api.hackertarget.com/httpheaders/?q='+self.host)
bot.sendMessage(chat, xx.text)
def links(self, chat):
zz = requests.get(url='https://api.hackertarget.com/pagelinks/?q='+self.h)
bot.sendMessage(chat, zz.text)
class ssh:
def __init__(self, ids_list):
self.session = requests.Session()
self.username = "".join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(10, 12)))
self.password = "sshDieProfis"
self.servers_id = ids_list
def main(self, chat):
current_id = random.choice(self.servers_id)
url = "https://www.speedssh.com/"
req = self.session.get(url)
cookies = dict(req.cookies)
url = "https://www.speedssh.com/create-account-ssh.php"
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'www.speedssh.com',
'Origin': 'https://www.speedssh.com',
'Referer': 'https://www.speedssh.com/create-ssh-account-server/30/ssh-server-united-states-1',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36 OPR/74.0.3911.75',
'X-Requested-With': 'XMLHttpRequest',
}
data = f"serverid={current_id}&username={self.username}&password={self.password}"
req = self.session.post(url, headers=headers, data=data)
if "Your Account has been successfully created" in req.text:
host_ip = req.text.split("<br>")[6].split(":")[1].strip()
all_info = f"{host_ip}:443@speedssh.com-{self.username}:{self.password}"
ex = req.text.split("<br>")[8]
alls=f"host : {host_ip} \nusername : speedssh.com-{self.username}\npass : {self.password}\nhttp_custom : {host_ip}:443@speedssh.com-{self.username}:{self.password}\n{ex}"
bot.sendMessage(chat, alls)
return alls
elif "has reached Account maximum" in req.text:
self.servers_id.remove(current_id)
self.main(chat)
else:
self.servers_id.remove(current_id)
self.main(chat)
class ssl:
def __init__(self, ids_list):
self.session = requests.Session()
self.username = "".join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(10, 12)))
self.password = "sslDieProfis"
self.servers_id = ids_list
def main(self, chat):
current_id = random.choice(self.servers_id)
url = "https://www.speedssh.com/"
req = self.session.get(url)
cookies = dict(req.cookies)
url = "https://www.speedssh.com/create-account-ssl.php"
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'www.speedssh.com',
'Origin': 'https://www.speedssh.com',
'Referer': 'https://www.speedssh.com/create-ssl-account-server/230/server-us-ssl/tls-1',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36 OPR/74.0.3911.75',
'X-Requested-With': 'XMLHttpRequest',
}
data = f"serverid={current_id}&username={self.username}&password={self.password}"
req = self.session.post(url, headers=headers, data=data)
if "Your Account has been successfully created" in req.text:
host_ip = req.text.split("<br>")[4].split(":")[1].strip()
all_info = f"{host_ip}:443@speedssh.com-{self.username}:{self.password}"
ex = req.text.split("<br>")[6]
alls=f"host : {host_ip} \nusername : speedssh.com-{self.username}\npass : {self.password}\nhttp_custom : {host_ip}:443@speedssh.com-{self.username}:{self.password}\n{ex}"
bot.sendMessage(chat, alls)
return alls
elif "has reached Account maximum" in req.text:
self.servers_id.remove(current_id)
self.main(chat)
else:
self.servers_id.remove(current_id)
self.main(chat)
serope = ["44", "46", "48", "50"]
sasia = ["36", "38", "40", "42"]
samrica = ["30", "32", "34"]
lerope = ["256", "252", "254", "256", "252"]
lasia = ["244", "238", "240", "242", "246", "248"]
lamrica = ["230", "234", "236"]
def substr(string, start, length = None):
if start < 0:
start = start + len(string)
if not length:
return string[start:]
elif length > 0:
return string[start:start + length]
else:
return string[start:length]
def bot_msg(msg):
chat_id = msg['chat']['id']
command = msg['text']
a=command
if '0' in str(command.find('/ssl')):
one = a.find('-num ')+5
one2 = a.find('-plc') - one - 1
one3 = substr(a, one, one2)
two = a.find('-plc ')+5
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/ssl -num '+one3+' -plc '+two3+';'
if string in a:
if two3 == 'er':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lerope)
x = creator.main(chat_id)
elif two3 == 'ar':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lamrica)
x = creator.main(chat_id)
elif two3 == 'as':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lasia)
x = creator.main(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible place please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/ssh')):
one = a.find('-num ')+5
one2 = a.find('-plc') - one - 1
one3 = substr(a, one, one2)
two = a.find('-plc ')+5
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/ssh -num '+one3+' -plc '+two3+';'
if string in a:
if two3 == 'er':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(serope)
x = creator.main(chat_id)
elif two3 == 'ar':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(samrica)
x = creator.main(chat_id)
elif two3 == 'as':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(sasia)
x = creator.main(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible place please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/host')):
one = a.find('-t ')+3
one2 = a.find('-h') - one - 1
one3 = substr(a, one, one2)
two = a.find('-h ')+3
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/host -t '+one3+' -h '+two3+';'
if string in a:
if one3 == 'port':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).port(chat_id)
elif one3 == 'lookup':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).lookup(chat_id)
elif one3 == 'header':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).header(chat_id)
elif one3 == 'links':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).links(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible type please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/help')):
helps = 'welcome to Die Profis bot\nlist of classes:\n host\n ssh\n ssl\n trojan (coming soon in new update 24/2/2021)\n proxy (coming soon in new update 24/2/2021)\n create dns server (coming soon in new update 24/2/2021)\nclass host:\n syntax:\n /host -t <select type> -h <host>;\n list of options (types):\n -port -> check open ports in host\n -header -> get headers from host\n -lookup -> get ip from host (dns)\n -links -> show other links for host\n -test -> for test inject (coming soon in new update 24/2/2021)\n test:\n /host -t port -h vodafone.com.eg;\n\nclass ssh:\n syntax:\n /ssh -num <num of account> -plc <place>\n list of places:\n -er\n -as\n -am\n -num -> number of accounts\n test:\n /ssl -num 5 -plc er;\n \nclass ssl:\n syntax:\n /ssl -num <num of account> -plc <place>\n list of places:\n -er\n -as\n -am\n -num -> number of accounts\n test:\n /ssl -num 5 -plc er;\n\n '
bot.sendMessage(chat_id, helps)
elif '0' in str(command.find('/start')):
bot.sendMessage(chat_id, 'welcome mr:.... (what\'s your name )')
bot = telepot.Bot('1871071012:AAF4U-vLrGSitG_qJVBjyc6bPBes-gozMOc')
MessageLoop(bot, bot_msg).run_as_thread()
while 1:
time.sleep(1)
| true
| true
|
790b3e5fb14d6e8d518bcda3bda455a223e93fe5
| 6,679
|
py
|
Python
|
adafruit_bus_device/i2c_device.py
|
rhthomas/Adafruit_CircuitPython_NRF24L01
|
a8507fca5585b1f2d1258a24a51f360be336142a
|
[
"MIT"
] | null | null | null |
adafruit_bus_device/i2c_device.py
|
rhthomas/Adafruit_CircuitPython_NRF24L01
|
a8507fca5585b1f2d1258a24a51f360be336142a
|
[
"MIT"
] | null | null | null |
adafruit_bus_device/i2c_device.py
|
rhthomas/Adafruit_CircuitPython_NRF24L01
|
a8507fca5585b1f2d1258a24a51f360be336142a
|
[
"MIT"
] | 4
|
2019-06-21T00:01:04.000Z
|
2021-11-17T08:49:51.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Scott Shawcroft for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_bus_device.i2c_device` - I2C Bus Device
====================================================
"""
__version__ = "3.0.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BusDevice.git"
class I2CDevice:
"""
Represents a single I2C device and manages locking the bus and the device
address.
:param ~busio.I2C i2c: The I2C bus the device is on
:param int device_address: The 7 bit device address
.. note:: This class is **NOT** built into CircuitPython. See
:ref:`here for install instructions <bus_device_installation>`.
Example:
.. code-block:: python
import busio
from board import *
from adafruit_bus_device.i2c_device import I2CDevice
with busio.I2C(SCL, SDA) as i2c:
device = I2CDevice(i2c, 0x70)
bytes_read = bytearray(4)
with device:
device.readinto(bytes_read)
# A second transaction
with device:
device.write(bytes_read)
"""
def __init__(self, i2c, device_address):
"""
Try to read a byte from an address,
if you get an OSError it means the device is not there
"""
while not i2c.try_lock():
pass
try:
i2c.writeto(device_address, b'')
except OSError:
# some OS's dont like writing an empty bytesting...
# Retry by reading a byte
try:
result = bytearray(1)
i2c.readfrom_into(device_address, result)
except OSError:
raise ValueError("No I2C device at address: %x" % device_address)
finally:
i2c.unlock()
self.i2c = i2c
self.device_address = device_address
def readinto(self, buf, **kwargs):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buffer: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
self.i2c.readfrom_into(self.device_address, buf, **kwargs)
def write(self, buf, **kwargs):
"""
Write the bytes from ``buffer`` to the device. Transmits a stop bit if
``stop`` is set.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buffer: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
:param bool stop: If true, output an I2C stop condition after the buffer is written
"""
self.i2c.writeto(self.device_address, buf, **kwargs)
#pylint: disable-msg=too-many-arguments
def write_then_readinto(self, out_buffer, in_buffer, *,
out_start=0, out_end=None, in_start=0, in_end=None, stop=True):
"""
Write the bytes from ``out_buffer`` to the device, then immediately
reads into ``in_buffer`` from the device. The number of bytes read
will be the length of ``in_buffer``.
Transmits a stop bit after the write, if ``stop`` is set.
If ``out_start`` or ``out_end`` is provided, then the output buffer
will be sliced as if ``out_buffer[out_start:out_end]``. This will
not cause an allocation like ``buffer[out_start:out_end]`` will so
it saves memory.
If ``in_start`` or ``in_end`` is provided, then the input buffer
will be sliced as if ``in_buffer[in_start:in_end]``. This will not
cause an allocation like ``in_buffer[in_start:in_end]`` will so
it saves memory.
:param bytearray out_buffer: buffer containing the bytes to write
:param bytearray in_buffer: buffer containing the bytes to read into
:param int out_start: Index to start writing from
:param int out_end: Index to read up to but not include
:param int in_start: Index to start writing at
:param int in_end: Index to write up to but not include
:param bool stop: If true, output an I2C stop condition after the buffer is written
"""
if out_end is None:
out_end = len(out_buffer)
if in_end is None:
in_end = len(in_buffer)
if hasattr(self.i2c, 'writeto_then_readfrom'):
# In linux, at least, this is a special kernel function call
self.i2c.writeto_then_readfrom(self.device_address, out_buffer, in_buffer,
out_start=out_start, out_end=out_end,
in_start=in_start, in_end=in_end, stop=stop)
else:
# If we don't have a special implementation, we can fake it with two calls
self.write(out_buffer, start=out_start, end=out_end, stop=stop)
self.readinto(in_buffer, start=in_start, end=in_end)
#pylint: enable-msg=too-many-arguments
def __enter__(self):
while not self.i2c.try_lock():
pass
return self
def __exit__(self, *exc):
self.i2c.unlock()
return False
| 40.23494
| 91
| 0.636473
|
__version__ = "3.0.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BusDevice.git"
class I2CDevice:
def __init__(self, i2c, device_address):
while not i2c.try_lock():
pass
try:
i2c.writeto(device_address, b'')
except OSError:
# Retry by reading a byte
try:
result = bytearray(1)
i2c.readfrom_into(device_address, result)
except OSError:
raise ValueError("No I2C device at address: %x" % device_address)
finally:
i2c.unlock()
self.i2c = i2c
self.device_address = device_address
def readinto(self, buf, **kwargs):
self.i2c.readfrom_into(self.device_address, buf, **kwargs)
def write(self, buf, **kwargs):
self.i2c.writeto(self.device_address, buf, **kwargs)
#pylint: disable-msg=too-many-arguments
def write_then_readinto(self, out_buffer, in_buffer, *,
out_start=0, out_end=None, in_start=0, in_end=None, stop=True):
if out_end is None:
out_end = len(out_buffer)
if in_end is None:
in_end = len(in_buffer)
if hasattr(self.i2c, 'writeto_then_readfrom'):
# In linux, at least, this is a special kernel function call
self.i2c.writeto_then_readfrom(self.device_address, out_buffer, in_buffer,
out_start=out_start, out_end=out_end,
in_start=in_start, in_end=in_end, stop=stop)
else:
# If we don't have a special implementation, we can fake it with two calls
self.write(out_buffer, start=out_start, end=out_end, stop=stop)
self.readinto(in_buffer, start=in_start, end=in_end)
def __enter__(self):
while not self.i2c.try_lock():
pass
return self
def __exit__(self, *exc):
self.i2c.unlock()
return False
| true
| true
|
790b3f0878cde69c567ac20a3f2110095bd45e49
| 50,944
|
py
|
Python
|
src/sage/functions/piecewise.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | 4
|
2020-07-17T04:49:44.000Z
|
2020-07-29T06:33:51.000Z
|
src/sage/functions/piecewise.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | 1
|
2020-04-18T16:30:43.000Z
|
2020-04-18T16:30:43.000Z
|
src/sage/functions/piecewise.py
|
dimpase/sage
|
468f23815ade42a2192b0a9cd378de8fdc594dcd
|
[
"BSL-1.0"
] | null | null | null |
r"""
Piecewise-defined Functions
This module implement piecewise functions in a single variable. See
:mod:`sage.sets.real_set` for more information about how to construct
subsets of the real line for the domains.
EXAMPLES::
sage: f = piecewise([((0,1), x^3), ([-1,0], -x^2)]); f
piecewise(x|-->x^3 on (0, 1), x|-->-x^2 on [-1, 0]; x)
sage: 2*f
2*piecewise(x|-->x^3 on (0, 1), x|-->-x^2 on [-1, 0]; x)
sage: f(x=1/2)
1/8
sage: plot(f) # not tested
.. TODO::
Implement max/min location and values,
AUTHORS:
- David Joyner (2006-04): initial version
- David Joyner (2006-09): added __eq__, extend_by_zero_to, unextend,
convolution, trapezoid, trapezoid_integral_approximation,
riemann_sum, riemann_sum_integral_approximation, tangent_line fixed
bugs in __mul__, __add__
- David Joyner (2007-03): adding Hann filter for FS, added general FS
filter methods for computing and plotting, added options to plotting
of FS (eg, specifying rgb values are now allowed). Fixed bug in
documentation reported by Pablo De Napoli.
- David Joyner (2007-09): bug fixes due to behaviour of
SymbolicArithmetic
- David Joyner (2008-04): fixed docstring bugs reported by J Morrow; added
support for Laplace transform of functions with infinite support.
- David Joyner (2008-07): fixed a left multiplication bug reported by
C. Boncelet (by defining __rmul__ = __mul__).
- Paul Butler (2009-01): added indefinite integration and default_variable
- Volker Braun (2013): Complete rewrite
- Ralf Stephan (2015): Rewrite of convolution() and other calculus
functions; many doctest adaptations
- Eric Gourgoulhon (2017): Improve documentation and user interface of
Fourier series
TESTS::
sage: fast_callable(f, vars=[x])(0.5)
0.125000000000...
"""
# ****************************************************************************
# Copyright (C) 2006 William Stein <wstein@gmail.com>
# 2006 David Joyner <wdjoyner@gmail.com>
# 2013 Volker Braun <vbraun.name@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import absolute_import, division, print_function
from sage.symbolic.function import BuiltinFunction
from sage.sets.real_set import RealSet
from sage.symbolic.ring import SR
from sage.rings.infinity import minus_infinity, infinity
class PiecewiseFunction(BuiltinFunction):
def __init__(self):
"""
Piecewise function
EXAMPLES::
sage: var('x, y')
(x, y)
sage: f = piecewise([((0,1), x^2*y), ([-1,0], -x*y^2)], var=x); f
piecewise(x|-->x^2*y on (0, 1), x|-->-x*y^2 on [-1, 0]; x)
sage: f(1/2)
1/4*y
sage: f(-1/2)
1/2*y^2
"""
BuiltinFunction.__init__(self, "piecewise",
latex_name="piecewise",
conversions=dict(), nargs=2)
def __call__(self, function_pieces, **kwds):
r"""
Piecewise functions
INPUT:
- ``function_pieces`` -- a list of pairs consisting of a
domain and a symbolic function.
- ``var=x`` -- a symbolic variable or ``None`` (default). The
real variable in which the function is piecewise in.
OUTPUT:
A piecewise-defined function. A ``ValueError`` will be raised
if the domains of the pieces are not pairwise disjoint.
EXAMPLES::
sage: my_abs = piecewise([((-1, 0), -x), ([0, 1], x)], var=x); my_abs
piecewise(x|-->-x on (-1, 0), x|-->x on [0, 1]; x)
sage: [ my_abs(i/5) for i in range(-4, 5)]
[4/5, 3/5, 2/5, 1/5, 0, 1/5, 2/5, 3/5, 4/5]
TESTS::
sage: piecewise([([-1, 0], -x), ([0, 1], x)], var=x)
Traceback (most recent call last):
...
ValueError: domains must be pairwise disjoint
sage: step = piecewise([((-1, 0), -1), ([0, 0], 0), ((0, 1), 1)], var=x); step
piecewise(x|-->-1 on (-1, 0), x|-->0 on {0}, x|-->1 on (0, 1); x)
sage: step(-1/2), step(0), step(1/2)
(-1, 0, 1)
"""
from types import FunctionType
var = kwds.pop('var', None)
parameters = []
domain_list = []
for piece in function_pieces:
domain, function = piece
if not isinstance(domain, RealSet):
domain = RealSet(domain)
if domain.is_empty():
continue
if isinstance(function, FunctionType):
if var is None:
var = SR.var('x')
if function.__code__.co_argcount == 0:
function = function()
else:
function = function(var)
function = SR(function)
if var is None and len(function.variables()) > 0:
var = function.variables()[0]
parameters.append((domain, function))
domain_list.append(domain)
if not RealSet.are_pairwise_disjoint(*domain_list):
raise ValueError('domains must be pairwise disjoint')
if var is None:
var = self.default_variable()
parameters = SR._force_pyobject(tuple(parameters), recursive=False)
return BuiltinFunction.__call__(self, parameters, var, **kwds)
def _print_(self, parameters, variable):
"""
Return a string representation
OUTPUT:
String.
EXAMPLES::
sage: p = piecewise([((-2, 0), -x), ([0, 4], x)], var=x)
sage: str(p) # indirect doctest
'piecewise(x|-->-x on (-2, 0), x|-->x on [0, 4]; x)'
"""
s = 'piecewise('
args = []
for domain, func in parameters:
args.append('{0}|-->{1} on {2}'.format(str(variable), str(func), str(domain)))
s += ', '.join(args) + '; {0})'.format(str(variable))
return s
def _subs_(self, subs_map, options, parameters, x):
"""
Callback from Pynac `subs()`
EXAMPLES:
If the substitution changes the piecewise variable, it must
evaluate to a number so that we know which component we are
on::
sage: p = piecewise([((-2, 0), -x), ([0, 4], x)], var=x)
sage: p.subs(x=-1)
1
sage: (10+p).subs(x=-1)
11
sage: p.subs(x=pi)
pi
Auxiliary variables can be substituted arbitrarily::
sage: var('x,y')
(x, y)
sage: p = piecewise([((-2, 0), -x^y), ([0, 2], x-y)], var=x); p
piecewise(x|-->-x^y on (-2, 0), x|-->x - y on [0, 2]; x)
sage: p.subs(y=sin(y))
piecewise(x|-->-x^sin(y) on (-2, 0), x|-->x - sin(y) on [0, 2]; x)
"""
point = subs_map.apply_to(x, 0)
if point == x:
# substitution only in auxiliary variables
new_params = []
for domain, func in parameters:
new_params.append((domain, subs_map.apply_to(func, 0)))
return piecewise(new_params, var=x)
if ((point.is_numeric() or point.is_constant())
and (point.is_real())):
if hasattr(point, 'pyobject'):
# unwrap any numeric values
point = point.pyobject()
else:
raise ValueError('substituting the piecewise variable must result in real number')
for domain, func in parameters:
if domain.contains(point):
return subs_map.apply_to(func, 0)
raise ValueError('point {} is not in the domain'.format(point))
@staticmethod
def in_operands(ex):
"""
Return whether a symbolic expression contains a piecewise
function as operand
INPUT:
- ``ex`` -- a symbolic expression.
OUTPUT:
Boolean
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))]); f
piecewise(x|-->sin(x) on {0}, x|-->cos(x) on (0, 2); x)
sage: piecewise.in_operands(f)
True
sage: piecewise.in_operands(1+sin(f))
True
sage: piecewise.in_operands(1+sin(0*f))
False
"""
def is_piecewise(ex):
result = ex.operator() is piecewise
for op in ex.operands():
result = result or is_piecewise(op)
return result
return is_piecewise(ex)
@staticmethod
def simplify(ex):
"""
Combine piecewise operands into single piecewise function
OUTPUT:
A piecewise function whose operands are not piecewiese if
possible, that is, as long as the piecewise variable is the same.
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))])
sage: piecewise.simplify(f)
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
class EvaluationMethods(object):
def __pow__(self, parameters, variable, n):
"""
Return the `n`-th power of the piecewise function by applying the
operation to each piece.
INPUT:
- ``n`` -- number or symbolic expression
EXAMPLES::
sage: f1(x) = -abs(x) + 1; f2(x) = abs(x - 2) - 1
sage: f = piecewise([ [(-1,1), f1], [(1,3), f2]])
sage: (f^2).integral(definite=True)
4/3
"""
return piecewise(zip(self.domains(),
[ex**n for ex in self.expressions()]),
var=variable)
def expression_at(self, parameters, variable, point):
"""
Return the expression defining the piecewise function at
``value``
INPUT:
- ``point`` -- a real number.
OUTPUT:
The symbolic expression defining the function value at the
given ``point``.
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))]); f
piecewise(x|-->sin(x) on {0}, x|-->cos(x) on (0, 2); x)
sage: f.expression_at(0)
sin(x)
sage: f.expression_at(1)
cos(x)
sage: f.expression_at(2)
Traceback (most recent call last):
...
ValueError: point is not in the domain
"""
for domain, func in parameters:
if domain.contains(point):
return func
raise ValueError('point is not in the domain')
which_function = expression_at
def domains(self, parameters, variable):
"""
Return the individual domains
See also :meth:`~expressions`.
OUTPUT:
The collection of domains of the component functions as a
tuple of :class:`~sage.sets.real_set.RealSet`.
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))]); f
piecewise(x|-->sin(x) on {0}, x|-->cos(x) on (0, 2); x)
sage: f.domains()
({0}, (0, 2))
"""
return tuple(dom for dom, fun in parameters)
def domain(self, parameters, variable):
"""
Return the domain
OUTPUT:
The union of the domains of the individual pieces as a
:class:`~sage.sets.real_set.RealSet`.
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))]); f
piecewise(x|-->sin(x) on {0}, x|-->cos(x) on (0, 2); x)
sage: f.domain()
[0, 2)
"""
intervals = []
for domain, func in parameters:
intervals += list(domain)
return RealSet(*intervals)
def __len__(self, parameters, variable):
"""
Return the number of "pieces"
OUTPUT:
Integer.
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))]); f
piecewise(x|-->sin(x) on {0}, x|-->cos(x) on (0, 2); x)
sage: len(f)
2
"""
return len(parameters)
def expressions(self, parameters, variable):
"""
Return the individual domains
See also :meth:`~domains`.
OUTPUT:
The collection of expressions of the component functions.
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))]); f
piecewise(x|-->sin(x) on {0}, x|-->cos(x) on (0, 2); x)
sage: f.expressions()
(sin(x), cos(x))
"""
return tuple(fun for dom, fun in parameters)
def items(self, parameters, variable):
"""
Iterate over the pieces of the piecewise function
.. NOTE::
You should probably use :meth:`pieces` instead, which
offers a nicer interface.
OUTPUT:
This method iterates over pieces of the piecewise
function, each represented by a pair. The first element is
the support, and the second the function over that
support.
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))])
sage: for support, function in f.items():
....: print('support is {0}, function is {1}'.format(support, function))
support is {0}, function is sin(x)
support is (0, 2), function is cos(x)
"""
for pair in parameters:
yield pair
def __call__(self, parameters, variable, value=None, **kwds):
"""
Call the piecewise function
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))]); f
piecewise(x|-->sin(x) on {0}, x|-->cos(x) on (0, 2); x)
sage: f(0)
0
sage: f(1)
cos(1)
sage: f(2)
Traceback (most recent call last):
...
ValueError: point 2 is not in the domain
"""
self = piecewise(parameters, var=variable)
substitution = dict()
for k, v in kwds.items():
substitution[SR.var(k)] = v
if value is not None:
substitution[variable] = value
return self.subs(substitution)
def _fast_float_(self, *args):
"""
Do not support the old ``fast_float``
OUTPUT:
This method raises ``NotImplementedError`` so that
plotting uses the newer `fast_callable` implementation.
EXAMPLES::
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))])
sage: f._fast_float_()
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
def _fast_callable_(self, parameters, variable, etb):
"""
Override the ``fast_callable``
OUTPUT:
A :class:`~sage.ext.fast_callable.ExpressionCall`
representing the piecewise function in the expression
tree.
EXAMPLES::
sage: p = piecewise([((-1, 0), -x), ([0, 1], x)], var=x)
sage: from sage.ext.fast_callable import ExpressionTreeBuilder
sage: etb = ExpressionTreeBuilder(vars=['x'])
sage: p._fast_callable_(etb)
{piecewise(x|-->-x on (-1, 0), x|-->x on [0, 1]; x)}(v_0)
"""
self = piecewise(parameters, var=variable)
return etb.call(self, variable)
def restriction(self, parameters, variable, restricted_domain):
"""
Restrict the domain
INPUT:
- ``restricted_domain`` -- a
:class:`~sage.sets.real_set.RealSet` or something that
defines one.
OUTPUT:
A new piecewise function obtained by restricting the domain.
EXAMPLES::
sage: f = piecewise([((-oo, oo), x)]); f
piecewise(x|-->x on (-oo, +oo); x)
sage: f.restriction([[-1,1], [3,3]])
piecewise(x|-->x on [-1, 1] + {3}; x)
"""
restricted_domain = RealSet(*restricted_domain)
new_param = []
for domain, func in parameters:
domain = domain.intersection(restricted_domain)
new_param.append((domain, func))
return piecewise(new_param, var=variable)
def extension(self, parameters, variable, extension, extension_domain=None):
"""
Extend the function
INPUT:
- ``extension`` -- a symbolic expression
- ``extension_domain`` -- a
:class:`~sage.sets.real_set.RealSet` or ``None``
(default). The domain of the extension. By default, the
entire complement of the current domain.
EXAMPLES::
sage: f = piecewise([((-1,1), x)]); f
piecewise(x|-->x on (-1, 1); x)
sage: f(3)
Traceback (most recent call last):
...
ValueError: point 3 is not in the domain
sage: g = f.extension(0); g
piecewise(x|-->x on (-1, 1), x|-->0 on (-oo, -1] + [1, +oo); x)
sage: g(3)
0
sage: h = f.extension(1, RealSet.unbounded_above_closed(1)); h
piecewise(x|-->x on (-1, 1), x|-->1 on [1, +oo); x)
sage: h(3)
1
"""
self = piecewise(parameters, var=variable)
if extension_domain is None:
extension_domain = self.domain().complement()
ext = ((extension_domain, SR(extension)),)
return piecewise(parameters + ext, var=variable)
def unextend_zero(self, parameters, variable):
"""
Remove zero pieces.
EXAMPLES::
sage: f = piecewise([((-1,1), x)]); f
piecewise(x|-->x on (-1, 1); x)
sage: g = f.extension(0); g
piecewise(x|-->x on (-1, 1), x|-->0 on (-oo, -1] + [1, +oo); x)
sage: g(3)
0
sage: h = g.unextend_zero()
sage: bool(h == f)
True
"""
result = [(domain, func) for domain,func in parameters
if func != 0]
return piecewise(result, var=variable)
def pieces(self, parameters, variable):
"""
Return the "pieces".
OUTPUT:
A tuple of piecewise functions, each having only a single
expression.
EXAMPLES::
sage: p = piecewise([((-1, 0), -x), ([0, 1], x)], var=x)
sage: p.pieces()
(piecewise(x|-->-x on (-1, 0); x),
piecewise(x|-->x on [0, 1]; x))
"""
result = []
for domain, func in parameters:
result.append(piecewise([(domain, func)], var=variable))
return tuple(result)
def end_points(self, parameters, variable):
"""
Return a list of all interval endpoints for this function.
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1-x
sage: f3(x) = x^2-5
sage: f = piecewise([[(0,1),f1],[(1,2),f2],[(2,3),f3]])
sage: f.end_points()
[0, 1, 2, 3]
sage: f = piecewise([([0,0], sin(x)), ((0,2), cos(x))]); f
piecewise(x|-->sin(x) on {0}, x|-->cos(x) on (0, 2); x)
sage: f.end_points()
[0, 2]
"""
s = set()
for domain, func in parameters:
for interval in domain:
s.add(interval.lower())
s.add(interval.upper())
s.discard(minus_infinity)
s.discard(infinity)
return sorted(s)
def piecewise_add(self, parameters, variable, other):
"""
Return a new piecewise function with domain the union
of the original domains and functions summed. Undefined
intervals in the union domain get function value `0`.
EXAMPLES::
sage: f = piecewise([([0,1], 1), ((2,3), x)])
sage: g = piecewise([((1/2, 2), x)])
sage: f.piecewise_add(g).unextend_zero()
piecewise(x|-->1 on (0, 1/2], x|-->x + 1 on (1/2, 1], x|-->x on (1, 2) + (2, 3); x)
"""
points = ([minus_infinity] +
sorted(set(self.end_points() + other.end_points())) +
[infinity])
domain = []
funcs = []
contains_lower = False
contains_upper = False
for i in range(len(points)-1):
try:
contains_lower = (self.domain().contains(points[i]) or
other.domain().contains(points[i])) and not contains_upper
contains_upper = (self.domain().contains(points[i+1]) or
other.domain().contains(points[i+1]))
if contains_lower:
if contains_upper:
rs = RealSet.closed(points[i],points[i+1])
else:
rs = RealSet.closed_open(points[i],points[i+1])
else:
if contains_upper:
rs = RealSet.open_closed(points[i],points[i+1])
else:
rs = RealSet.open(points[i],points[i+1])
point = (points[i+1] + points[i])/2
except ValueError:
if points[i] == minus_infinity and points[i+1] == infinity:
rs = RealSet.open(minus_infinity, infinity)
point = 0
elif points[i] == minus_infinity:
if contains_lower:
rs = RealSet.unbounded_below_closed(points[i+1])
else:
rs = RealSet.unbounded_below_open(points[i+1])
point = points[i+1]-1
elif points[i+1] == infinity:
if contains_upper:
rs = RealSet.unbounded_above_closed(points[i])
else:
rs = RealSet.unbounded_above_open(points[i])
point = points[i]+1
else:
raise
try:
ex1 = self.expression_at(point)
except ValueError:
ex1 = 0
try:
ex2 = other.expression_at(point)
except ValueError:
ex2 = 0
ex = ex1 + ex2
if i>0 and funcs[-1] == ex:
# extend the previous domain
rs += domain[-1]
domain[-1] = rs
else:
domain += rs
funcs.append(ex)
return piecewise(zip(domain, funcs))
def integral(self, parameters, variable, x=None, a=None, b=None, definite=False):
r"""
By default, return the indefinite integral of the function.
If definite=True is given, returns the definite integral.
AUTHOR:
- Paul Butler
EXAMPLES::
sage: f1(x) = 1-x
sage: f = piecewise([((0,1),1), ((1,2),f1)])
sage: f.integral(definite=True)
1/2
::
sage: f1(x) = -1
sage: f2(x) = 2
sage: f = piecewise([((0,pi/2),f1), ((pi/2,pi),f2)])
sage: f.integral(definite=True)
1/2*pi
sage: f1(x) = 2
sage: f2(x) = 3 - x
sage: f = piecewise([[(-2, 0), f1], [(0, 3), f2]])
sage: f.integral()
piecewise(x|-->2*x + 4 on (-2, 0), x|-->-1/2*x^2 + 3*x + 4 on (0, 3); x)
sage: f1(y) = -1
sage: f2(y) = y + 3
sage: f3(y) = -y - 1
sage: f4(y) = y^2 - 1
sage: f5(y) = 3
sage: f = piecewise([[[-4,-3],f1],[(-3,-2),f2],[[-2,0],f3],[(0,2),f4],[[2,3],f5]])
sage: F = f.integral(y)
sage: F
piecewise(y|-->-y - 4 on [-4, -3], y|-->1/2*y^2 + 3*y + 7/2 on (-3, -2), y|-->-1/2*y^2 - y - 1/2 on [-2, 0], y|-->1/3*y^3 - y - 1/2 on (0, 2), y|-->3*y - 35/6 on [2, 3]; y)
Ensure results are consistent with FTC::
sage: F(-3) - F(-4)
-1
sage: F(-1) - F(-3)
1
sage: F(2) - F(0)
2/3
sage: f.integral(y, 0, 2)
2/3
sage: F(3) - F(-4)
19/6
sage: f.integral(y, -4, 3)
19/6
sage: f.integral(definite=True)
19/6
::
sage: f1(y) = (y+3)^2
sage: f2(y) = y+3
sage: f3(y) = 3
sage: f = piecewise([[(-infinity, -3), f1], [(-3, 0), f2], [(0, infinity), f3]])
sage: f.integral()
piecewise(y|-->1/3*y^3 + 3*y^2 + 9*y + 9 on (-oo, -3), y|-->1/2*y^2 + 3*y + 9/2 on (-3, 0), y|-->3*y + 9/2 on (0, +oo); y)
::
sage: f1(x) = e^(-abs(x))
sage: f = piecewise([[(-infinity, infinity), f1]])
sage: f.integral(definite=True)
2
sage: f.integral()
piecewise(x|-->-integrate(e^(-abs(x)), x, x, +Infinity) on (-oo, +oo); x)
::
sage: f = piecewise([((0, 5), cos(x))])
sage: f.integral()
piecewise(x|-->sin(x) on (0, 5); x)
TESTS:
Verify that piecewise integrals of zero work (:trac:`10841`)::
sage: f0(x) = 0
sage: f = piecewise([[[0,1],f0]])
sage: f.integral(x,0,1)
0
sage: f = piecewise([[[0,1], 0]])
sage: f.integral(x,0,1)
0
sage: f = piecewise([[[0,1], SR(0)]])
sage: f.integral(x,0,1)
0
"""
if a is not None and b is not None:
F = self.integral(x)
return F(b) - F(a)
if a is not None or b is not None:
raise TypeError('only one endpoint given')
area = 0
new_pieces = []
if x is None:
x = self.default_variable()
# The integral is computed by iterating over the pieces in order.
# The definite integral for each piece is calculated and accumulated in `area`.
# The indefinite integral of each piece is also calculated,
# and the `area` before each piece is added to the piece.
#
# If a definite integral is requested, `area` is returned.
# Otherwise, a piecewise function is constructed from the indefinite integrals
# and returned.
#
# An exception is made if integral is called on a piecewise function
# that starts at -infinity. In this case, we do not try to calculate the
# definite integral of the first piece, and the value of `area` remains 0
# after the first piece.
from sage.symbolic.assumptions import assume, forget
for domain, fun in parameters:
for interval in domain:
start = interval.lower()
end = interval.upper()
if start == -infinity and not definite:
fun_integrated = fun.integral(x, end, x)
else:
try:
assume(start < x)
except ValueError: # Assumption is redundant
pass
fun_integrated = fun.integral(x, start, x) + area
forget(start < x)
if definite or end != infinity:
area += fun.integral(x, start, end)
new_pieces.append([interval, SR(fun_integrated).function(x)])
if definite:
return SR(area)
else:
return piecewise(new_pieces)
def critical_points(self, parameters, variable):
"""
Return the critical points of this piecewise function.
EXAMPLES::
sage: R.<x> = QQ[]
sage: f1 = x^0
sage: f2 = 10*x - x^2
sage: f3 = 3*x^4 - 156*x^3 + 3036*x^2 - 26208*x
sage: f = piecewise([[(0,3),f1],[(3,10),f2],[(10,20),f3]])
sage: expected = [5, 12, 13, 14]
sage: all(abs(e-a) < 0.001 for e,a in zip(expected, f.critical_points()))
True
TESTS:
Use variables other than x (:trac:`13836`)::
sage: R.<y> = QQ[]
sage: f1 = y^0
sage: f2 = 10*y - y^2
sage: f3 = 3*y^4 - 156*y^3 + 3036*y^2 - 26208*y
sage: f = piecewise([[(0,3),f1],[(3,10),f2],[(10,20),f3]])
sage: expected = [5, 12, 13, 14]
sage: all(abs(e-a) < 0.001 for e,a in zip(expected, f.critical_points()))
True
"""
from sage.calculus.calculus import maxima
x = self.default_variable()
crit_pts = []
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
for root in maxima.allroots(SR(f).diff(x)==0):
root = float(root.rhs())
if a < root < b:
crit_pts.append(root)
return crit_pts
def convolution(self, parameters, variable, other):
r"""
Return the convolution function,
`f*g(t)=\int_{-\infty}^\infty f(u)g(t-u)du`, for compactly
supported `f,g`.
EXAMPLES::
sage: x = PolynomialRing(QQ,'x').gen()
sage: f = piecewise([[[0,1],1]]) ## example 0
sage: g = f.convolution(f); g
piecewise(x|-->x on (0, 1], x|-->-x + 2 on (1, 2]; x)
sage: h = f.convolution(g); h
piecewise(x|-->1/2*x^2 on (0, 1], x|-->-x^2 + 3*x - 3/2 on (1, 2], x|-->1/2*x^2 - 3*x + 9/2 on (2, 3]; x)
sage: f = piecewise([[(0,1),1],[(1,2),2],[(2,3),1]]) ## example 1
sage: g = f.convolution(f)
sage: h = f.convolution(g); h
piecewise(x|-->1/2*x^2 on (0, 1], x|-->2*x^2 - 3*x + 3/2 on (1, 3], x|-->-2*x^2 + 21*x - 69/2 on (3, 4], x|-->-5*x^2 + 45*x - 165/2 on (4, 5], x|-->-2*x^2 + 15*x - 15/2 on (5, 6], x|-->2*x^2 - 33*x + 273/2 on (6, 8], x|-->1/2*x^2 - 9*x + 81/2 on (8, 9]; x)
sage: f = piecewise([[(-1,1),1]]) ## example 2
sage: g = piecewise([[(0,3),x]])
sage: f.convolution(g)
piecewise(x|-->1/2*x^2 + x + 1/2 on (-1, 1], x|-->2*x on (1, 2], x|-->-1/2*x^2 + x + 4 on (2, 4]; x)
sage: g = piecewise([[(0,3),1],[(3,4),2]])
sage: f.convolution(g)
piecewise(x|-->x + 1 on (-1, 1], x|-->2 on (1, 2], x|-->x on (2, 3], x|-->-x + 6 on (3, 4], x|-->-2*x + 10 on (4, 5]; x)
Check that the bugs raised in :trac:`12123` are fixed::
sage: f = piecewise([[(-2, 2), 2]])
sage: g = piecewise([[(0, 2), 3/4]])
sage: f.convolution(g)
piecewise(x|-->3/2*x + 3 on (-2, 0], x|-->3 on (0, 2], x|-->-3/2*x + 6 on (2, 4]; x)
sage: f = piecewise([[(-1, 1), 1]])
sage: g = piecewise([[(0, 1), x], [(1, 2), -x + 2]])
sage: f.convolution(g)
piecewise(x|-->1/2*x^2 + x + 1/2 on (-1, 0], x|-->-1/2*x^2 + x + 1/2 on (0, 2], x|-->1/2*x^2 - 3*x + 9/2 on (2, 3]; x)
"""
from sage.symbolic.integration.integral import definite_integral
f = self
g = other
if len(f.end_points())*len(g.end_points()) == 0:
raise ValueError('one of the piecewise functions is nowhere defined')
tt = SR.var('tt')
uu = SR.var('uu')
fd, f0 = parameters[0]
gd, g0 = next(other.items())
if len(f)==1 and len(g)==1:
f = f.unextend_zero()
g = g.unextend_zero()
a1 = fd[0].lower()
a2 = fd[0].upper()
b1 = gd[0].lower()
b2 = gd[0].upper()
i1 = f0.subs({variable: uu})
i2 = g0.subs({variable: tt-uu})
fg1 = definite_integral(i1*i2, uu, a1, tt-b1).subs(tt = variable)
fg2 = definite_integral(i1*i2, uu, tt-b2, tt-b1).subs(tt = variable)
fg3 = definite_integral(i1*i2, uu, tt-b2, a2).subs(tt = variable)
fg4 = definite_integral(i1*i2, uu, a1, a2).subs(tt = variable)
if a1-b1<a2-b2:
if a2+b1!=a1+b2:
h = piecewise([[(a1+b1,a1+b2),fg1],[(a1+b2,a2+b1),fg2],[(a2+b1,a2+b2),fg3]])
else:
h = piecewise([[(a1+b1,a1+b2),fg1],[(a1+b2,a2+b2),fg3]])
else:
if a1+b2!=a2+b1:
h = piecewise([[(a1+b1,a2+b1),fg1],[(a2+b1,a1+b2),fg4],[(a1+b2,a2+b2),fg3]])
else:
h = piecewise([[(a1+b1,a2+b1),fg1],[(a2+b1,a2+b2),fg3]])
return (piecewise([[(minus_infinity,infinity),0]]).piecewise_add(h)).unextend_zero()
if len(f)>1 or len(g)>1:
z = piecewise([[(0,0),0]])
for fpiece in f.pieces():
for gpiece in g.pieces():
h = gpiece.convolution(fpiece)
z = z.piecewise_add(h)
return z.unextend_zero()
def trapezoid(self, parameters, variable, N):
"""
Return the piecewise line function defined by the trapezoid rule
for numerical integration based on a subdivision of each domain
interval into N subintervals.
EXAMPLES::
sage: f = piecewise([[[0,1], x^2], [RealSet.open_closed(1,2), 5-x^2]])
sage: f.trapezoid(2)
piecewise(x|-->1/2*x on (0, 1/2), x|-->3/2*x - 1/2 on (1/2, 1), x|-->7/2*x - 5/2 on (1, 3/2), x|-->-7/2*x + 8 on (3/2, 2); x)
sage: f = piecewise([[[-1,1], 1-x^2]])
sage: f.trapezoid(4).integral(definite=True)
5/4
sage: f = piecewise([[[-1,1], 1/2+x-x^3]]) ## example 3
sage: f.trapezoid(6).integral(definite=True)
1
TESTS:
Use variables or rings other than x (:trac:`13836`)::
sage: R.<y> = QQ[]
sage: f1 = y^2
sage: f2 = 5-y^2
sage: f = piecewise([[[0,1],f1], [RealSet.open_closed(1,2),f2]])
sage: f.trapezoid(2)
piecewise(y|-->1/2*y on (0, 1/2), y|-->3/2*y - 1/2 on (1/2, 1), y|-->7/2*y - 5/2 on (1, 3/2), y|-->-7/2*y + 8 on (3/2, 2); y)
"""
def func(x0, x1):
f0, f1 = self(x0), self(x1)
return [[(x0,x1), f0 + (f1-f0) * (x1-x0)**(-1)
* (self.default_variable()-x0)]]
rsum = []
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
h = (b-a)/N
for i in range(N):
x0 = a+i*h
x1 = a+(i+1)*h
rsum += func(x0, x1)
return piecewise(rsum)
def laplace(self, parameters, variable, x='x', s='t'):
r"""
Returns the Laplace transform of self with respect to the variable
var.
INPUT:
- ``x`` - variable of self
- ``s`` - variable of Laplace transform.
We assume that a piecewise function is 0 outside of its domain and
that the left-most endpoint of the domain is 0.
EXAMPLES::
sage: x, s, w = var('x, s, w')
sage: f = piecewise([[(0,1),1],[[1,2], 1-x]])
sage: f.laplace(x, s)
-e^(-s)/s + (s + 1)*e^(-2*s)/s^2 + 1/s - e^(-s)/s^2
sage: f.laplace(x, w)
-e^(-w)/w + (w + 1)*e^(-2*w)/w^2 + 1/w - e^(-w)/w^2
::
sage: y, t = var('y, t')
sage: f = piecewise([[[1,2], 1-y]])
sage: f.laplace(y, t)
(t + 1)*e^(-2*t)/t^2 - e^(-t)/t^2
::
sage: s = var('s')
sage: t = var('t')
sage: f1(t) = -t
sage: f2(t) = 2
sage: f = piecewise([[[0,1],f1],[(1,infinity),f2]])
sage: f.laplace(t,s)
(s + 1)*e^(-s)/s^2 + 2*e^(-s)/s - 1/s^2
"""
from sage.all import assume, exp, forget
x = SR.var(x)
s = SR.var(s)
assume(s>0)
result = 0
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
result += (SR(f)*exp(-s*x)).integral(x,a,b)
forget(s>0)
return result
def fourier_series_cosine_coefficient(self, parameters,
variable, n, L=None):
r"""
Return the `n`-th cosine coefficient of the Fourier series of
the periodic function `f` extending the piecewise-defined
function ``self``.
Given an integer `n\geq 0`, the `n`-th cosine coefficient of
the Fourier series of `f` is defined by
.. MATH::
a_n = \frac{1}{L}\int_{-L}^L
f(x)\cos\left(\frac{n\pi x}{L}\right) dx,
where `L` is the half-period of `f`. For `n\geq 1`, `a_n` is
the coefficient of `\cos(n\pi x/L)` in the Fourier series of
`f`, while `a_0` is twice the coefficient of the constant
term `\cos(0 x)`, i.e. twice the mean value of `f` over one
period (cf. :meth:`fourier_series_partial_sum`).
INPUT:
- ``n`` -- a non-negative integer
- ``L`` -- (default: ``None``) the half-period of `f`; if none
is provided, `L` is assumed to be the half-width of the domain
of ``self``
OUTPUT:
- the Fourier coefficient `a_n`, as defined above
EXAMPLES:
A triangle wave function of period 2::
sage: f = piecewise([((0,1), x), ((1,2), 2-x)])
sage: f.fourier_series_cosine_coefficient(0)
1
sage: f.fourier_series_cosine_coefficient(3)
-4/9/pi^2
If the domain of the piecewise-defined function encompasses
more than one period, the half-period must be passed as the
second argument; for instance::
sage: f2 = piecewise([((0,1), x), ((1,2), 2-x),
....: ((2,3), x-2), ((3,4), 2-(x-2))])
sage: bool(f2.restriction((0,2)) == f) # f2 extends f on (0,4)
True
sage: f2.fourier_series_cosine_coefficient(3, 1) # half-period = 1
-4/9/pi^2
The default half-period is 2 and one has::
sage: f2.fourier_series_cosine_coefficient(3) # half-period = 2
0
The Fourier coefficient `-4/(9\pi^2)` obtained above is actually
recovered for `n=6`::
sage: f2.fourier_series_cosine_coefficient(6)
-4/9/pi^2
Other examples::
sage: f(x) = x^2
sage: f = piecewise([[(-1,1),f]])
sage: f.fourier_series_cosine_coefficient(2)
pi^(-2)
sage: f1(x) = -1
sage: f2(x) = 2
sage: f = piecewise([[(-pi,pi/2),f1],[(pi/2,pi),f2]])
sage: f.fourier_series_cosine_coefficient(5,pi)
-3/5/pi
"""
from sage.all import cos, pi
L0 = (self.domain().sup() - self.domain().inf()) / 2
if not L:
L = L0
else:
m = L0 / L
if not (m.is_integer() and m > 0):
raise ValueError("the width of the domain of " +
"{} is not a multiple ".format(self) +
"of the given period")
x = SR.var('x')
result = 0
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
result += (f*cos(pi*x*n/L)).integrate(x, a, b)
return SR(result/L0).simplify_trig()
def fourier_series_sine_coefficient(self, parameters, variable,
n, L=None):
r"""
Return the `n`-th sine coefficient of the Fourier series of
the periodic function `f` extending the piecewise-defined
function ``self``.
Given an integer `n\geq 0`, the `n`-th sine coefficient of
the Fourier series of `f` is defined by
.. MATH::
b_n = \frac{1}{L}\int_{-L}^L
f(x)\sin\left(\frac{n\pi x}{L}\right) dx,
where `L` is the half-period of `f`. The number `b_n` is
the coefficient of `\sin(n\pi x/L)` in the Fourier
series of `f` (cf. :meth:`fourier_series_partial_sum`).
INPUT:
- ``n`` -- a non-negative integer
- ``L`` -- (default: ``None``) the half-period of `f`; if none
is provided, `L` is assumed to be the half-width of the domain
of ``self``
OUTPUT:
- the Fourier coefficient `b_n`, as defined above
EXAMPLES:
A square wave function of period 2::
sage: f = piecewise([((-1,0), -1), ((0,1), 1)])
sage: f.fourier_series_sine_coefficient(1)
4/pi
sage: f.fourier_series_sine_coefficient(2)
0
sage: f.fourier_series_sine_coefficient(3)
4/3/pi
If the domain of the piecewise-defined function encompasses
more than one period, the half-period must be passed as the
second argument; for instance::
sage: f2 = piecewise([((-1,0), -1), ((0,1), 1),
....: ((1,2), -1), ((2,3), 1)])
sage: bool(f2.restriction((-1,1)) == f) # f2 extends f on (-1,3)
True
sage: f2.fourier_series_sine_coefficient(1, 1) # half-period = 1
4/pi
sage: f2.fourier_series_sine_coefficient(3, 1) # half-period = 1
4/3/pi
The default half-period is 2 and one has::
sage: f2.fourier_series_sine_coefficient(1) # half-period = 2
0
sage: f2.fourier_series_sine_coefficient(3) # half-period = 2
0
The Fourier coefficients obtained from ``f`` are actually
recovered for `n=2` and `n=6` respectively::
sage: f2.fourier_series_sine_coefficient(2)
4/pi
sage: f2.fourier_series_sine_coefficient(6)
4/3/pi
"""
from sage.all import sin, pi
L0 = (self.domain().sup() - self.domain().inf()) / 2
if not L:
L = L0
else:
m = L0 / L
if not (m.is_integer() and m > 0):
raise ValueError("the width of the domain of " +
"{} is not a multiple ".format(self) +
"of the given period")
x = SR.var('x')
result = 0
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
result += (f*sin(pi*x*n/L)).integrate(x, a, b)
return SR(result/L0).simplify_trig()
def fourier_series_partial_sum(self, parameters, variable, N,
L=None):
r"""
Returns the partial sum up to a given order of the Fourier series
of the periodic function `f` extending the piecewise-defined
function ``self``.
The Fourier partial sum of order `N` is defined as
.. MATH::
S_{N}(x) = \frac{a_0}{2} + \sum_{n=1}^{N} \left[
a_n\cos\left(\frac{n\pi x}{L}\right)
+ b_n\sin\left(\frac{n\pi x}{L}\right)\right],
where `L` is the half-period of `f` and the `a_n`'s and `b_n`'s
are respectively the cosine coefficients and sine coefficients
of the Fourier series of `f` (cf.
:meth:`fourier_series_cosine_coefficient` and
:meth:`fourier_series_sine_coefficient`).
INPUT:
- ``N`` -- a positive integer; the order of the partial sum
- ``L`` -- (default: ``None``) the half-period of `f`; if none
is provided, `L` is assumed to be the half-width of the domain
of ``self``
OUTPUT:
- the partial sum `S_{N}(x)`, as a symbolic expression
EXAMPLES:
A square wave function of period 2::
sage: f = piecewise([((-1,0), -1), ((0,1), 1)])
sage: f.fourier_series_partial_sum(5)
4/5*sin(5*pi*x)/pi + 4/3*sin(3*pi*x)/pi + 4*sin(pi*x)/pi
If the domain of the piecewise-defined function encompasses
more than one period, the half-period must be passed as the
second argument; for instance::
sage: f2 = piecewise([((-1,0), -1), ((0,1), 1),
....: ((1,2), -1), ((2,3), 1)])
sage: bool(f2.restriction((-1,1)) == f) # f2 extends f on (-1,3)
True
sage: f2.fourier_series_partial_sum(5, 1) # half-period = 1
4/5*sin(5*pi*x)/pi + 4/3*sin(3*pi*x)/pi + 4*sin(pi*x)/pi
sage: bool(f2.fourier_series_partial_sum(5, 1) ==
....: f.fourier_series_partial_sum(5))
True
The default half-period is 2, so that skipping the second
argument yields a different result::
sage: f2.fourier_series_partial_sum(5) # half-period = 2
4*sin(pi*x)/pi
An example of partial sum involving both cosine and sine terms::
sage: f = piecewise([((-1,0), 0), ((0,1/2), 2*x),
....: ((1/2,1), 2*(1-x))])
sage: f.fourier_series_partial_sum(5)
-2*cos(2*pi*x)/pi^2 + 4/25*sin(5*pi*x)/pi^2
- 4/9*sin(3*pi*x)/pi^2 + 4*sin(pi*x)/pi^2 + 1/4
"""
from sage.all import pi, sin, cos, srange
if not L:
L = (self.domain().sup() - self.domain().inf()) / 2
x = self.default_variable()
a0 = self.fourier_series_cosine_coefficient(0, L)
result = a0/2 + sum([(self.fourier_series_cosine_coefficient(n, L)*cos(n*pi*x/L) +
self.fourier_series_sine_coefficient(n, L)*sin(n*pi*x/L))
for n in srange(1, N+1)])
return SR(result).expand()
def _sympy_(self, parameters, variable):
"""
Convert this piecewise expression to its SymPy equivalent.
EXAMPLES::
sage: ex = piecewise([((0, 1), pi), ([1, 2], x)])
sage: f = ex._sympy_(); f
Piecewise((pi, (x > 0) & (x < 1)), (x, (x >= 1) & (x <= 2)))
sage: f.diff()
Piecewise((0, (x > 0) & (x < 1)), (1, (x >= 1) & (x <= 2)))
sage: ex = piecewise([((-100, -2), 1/x), ((1, +oo), cos(x))])
sage: g = ex._sympy_(); g
Piecewise((1/x, (x > -100) & (x < -2)), (cos(x), x > 1))
sage: g.diff()
Piecewise((-1/x**2, (x > -100) & (x < -2)), (-sin(x), x > 1))
"""
from sympy import Piecewise as pw
args = [(func._sympy_(),
domain._sympy_condition_(variable))
for domain, func in parameters]
return pw(*args)
piecewise = PiecewiseFunction()
| 36.942712
| 272
| 0.457816
|
from __future__ import absolute_import, division, print_function
from sage.symbolic.function import BuiltinFunction
from sage.sets.real_set import RealSet
from sage.symbolic.ring import SR
from sage.rings.infinity import minus_infinity, infinity
class PiecewiseFunction(BuiltinFunction):
def __init__(self):
BuiltinFunction.__init__(self, "piecewise",
latex_name="piecewise",
conversions=dict(), nargs=2)
def __call__(self, function_pieces, **kwds):
from types import FunctionType
var = kwds.pop('var', None)
parameters = []
domain_list = []
for piece in function_pieces:
domain, function = piece
if not isinstance(domain, RealSet):
domain = RealSet(domain)
if domain.is_empty():
continue
if isinstance(function, FunctionType):
if var is None:
var = SR.var('x')
if function.__code__.co_argcount == 0:
function = function()
else:
function = function(var)
function = SR(function)
if var is None and len(function.variables()) > 0:
var = function.variables()[0]
parameters.append((domain, function))
domain_list.append(domain)
if not RealSet.are_pairwise_disjoint(*domain_list):
raise ValueError('domains must be pairwise disjoint')
if var is None:
var = self.default_variable()
parameters = SR._force_pyobject(tuple(parameters), recursive=False)
return BuiltinFunction.__call__(self, parameters, var, **kwds)
def _print_(self, parameters, variable):
s = 'piecewise('
args = []
for domain, func in parameters:
args.append('{0}|-->{1} on {2}'.format(str(variable), str(func), str(domain)))
s += ', '.join(args) + '; {0})'.format(str(variable))
return s
def _subs_(self, subs_map, options, parameters, x):
point = subs_map.apply_to(x, 0)
if point == x:
new_params = []
for domain, func in parameters:
new_params.append((domain, subs_map.apply_to(func, 0)))
return piecewise(new_params, var=x)
if ((point.is_numeric() or point.is_constant())
and (point.is_real())):
if hasattr(point, 'pyobject'):
point = point.pyobject()
else:
raise ValueError('substituting the piecewise variable must result in real number')
for domain, func in parameters:
if domain.contains(point):
return subs_map.apply_to(func, 0)
raise ValueError('point {} is not in the domain'.format(point))
@staticmethod
def in_operands(ex):
def is_piecewise(ex):
result = ex.operator() is piecewise
for op in ex.operands():
result = result or is_piecewise(op)
return result
return is_piecewise(ex)
@staticmethod
def simplify(ex):
raise NotImplementedError
class EvaluationMethods(object):
def __pow__(self, parameters, variable, n):
return piecewise(zip(self.domains(),
[ex**n for ex in self.expressions()]),
var=variable)
def expression_at(self, parameters, variable, point):
for domain, func in parameters:
if domain.contains(point):
return func
raise ValueError('point is not in the domain')
which_function = expression_at
def domains(self, parameters, variable):
return tuple(dom for dom, fun in parameters)
def domain(self, parameters, variable):
intervals = []
for domain, func in parameters:
intervals += list(domain)
return RealSet(*intervals)
def __len__(self, parameters, variable):
return len(parameters)
def expressions(self, parameters, variable):
return tuple(fun for dom, fun in parameters)
def items(self, parameters, variable):
for pair in parameters:
yield pair
def __call__(self, parameters, variable, value=None, **kwds):
self = piecewise(parameters, var=variable)
substitution = dict()
for k, v in kwds.items():
substitution[SR.var(k)] = v
if value is not None:
substitution[variable] = value
return self.subs(substitution)
def _fast_float_(self, *args):
raise NotImplementedError
def _fast_callable_(self, parameters, variable, etb):
self = piecewise(parameters, var=variable)
return etb.call(self, variable)
def restriction(self, parameters, variable, restricted_domain):
restricted_domain = RealSet(*restricted_domain)
new_param = []
for domain, func in parameters:
domain = domain.intersection(restricted_domain)
new_param.append((domain, func))
return piecewise(new_param, var=variable)
def extension(self, parameters, variable, extension, extension_domain=None):
self = piecewise(parameters, var=variable)
if extension_domain is None:
extension_domain = self.domain().complement()
ext = ((extension_domain, SR(extension)),)
return piecewise(parameters + ext, var=variable)
def unextend_zero(self, parameters, variable):
result = [(domain, func) for domain,func in parameters
if func != 0]
return piecewise(result, var=variable)
def pieces(self, parameters, variable):
result = []
for domain, func in parameters:
result.append(piecewise([(domain, func)], var=variable))
return tuple(result)
def end_points(self, parameters, variable):
s = set()
for domain, func in parameters:
for interval in domain:
s.add(interval.lower())
s.add(interval.upper())
s.discard(minus_infinity)
s.discard(infinity)
return sorted(s)
def piecewise_add(self, parameters, variable, other):
points = ([minus_infinity] +
sorted(set(self.end_points() + other.end_points())) +
[infinity])
domain = []
funcs = []
contains_lower = False
contains_upper = False
for i in range(len(points)-1):
try:
contains_lower = (self.domain().contains(points[i]) or
other.domain().contains(points[i])) and not contains_upper
contains_upper = (self.domain().contains(points[i+1]) or
other.domain().contains(points[i+1]))
if contains_lower:
if contains_upper:
rs = RealSet.closed(points[i],points[i+1])
else:
rs = RealSet.closed_open(points[i],points[i+1])
else:
if contains_upper:
rs = RealSet.open_closed(points[i],points[i+1])
else:
rs = RealSet.open(points[i],points[i+1])
point = (points[i+1] + points[i])/2
except ValueError:
if points[i] == minus_infinity and points[i+1] == infinity:
rs = RealSet.open(minus_infinity, infinity)
point = 0
elif points[i] == minus_infinity:
if contains_lower:
rs = RealSet.unbounded_below_closed(points[i+1])
else:
rs = RealSet.unbounded_below_open(points[i+1])
point = points[i+1]-1
elif points[i+1] == infinity:
if contains_upper:
rs = RealSet.unbounded_above_closed(points[i])
else:
rs = RealSet.unbounded_above_open(points[i])
point = points[i]+1
else:
raise
try:
ex1 = self.expression_at(point)
except ValueError:
ex1 = 0
try:
ex2 = other.expression_at(point)
except ValueError:
ex2 = 0
ex = ex1 + ex2
if i>0 and funcs[-1] == ex:
rs += domain[-1]
domain[-1] = rs
else:
domain += rs
funcs.append(ex)
return piecewise(zip(domain, funcs))
def integral(self, parameters, variable, x=None, a=None, b=None, definite=False):
if a is not None and b is not None:
F = self.integral(x)
return F(b) - F(a)
if a is not None or b is not None:
raise TypeError('only one endpoint given')
area = 0
new_pieces = []
if x is None:
x = self.default_variable()
from sage.symbolic.assumptions import assume, forget
for domain, fun in parameters:
for interval in domain:
start = interval.lower()
end = interval.upper()
if start == -infinity and not definite:
fun_integrated = fun.integral(x, end, x)
else:
try:
assume(start < x)
except ValueError:
pass
fun_integrated = fun.integral(x, start, x) + area
forget(start < x)
if definite or end != infinity:
area += fun.integral(x, start, end)
new_pieces.append([interval, SR(fun_integrated).function(x)])
if definite:
return SR(area)
else:
return piecewise(new_pieces)
def critical_points(self, parameters, variable):
from sage.calculus.calculus import maxima
x = self.default_variable()
crit_pts = []
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
for root in maxima.allroots(SR(f).diff(x)==0):
root = float(root.rhs())
if a < root < b:
crit_pts.append(root)
return crit_pts
def convolution(self, parameters, variable, other):
from sage.symbolic.integration.integral import definite_integral
f = self
g = other
if len(f.end_points())*len(g.end_points()) == 0:
raise ValueError('one of the piecewise functions is nowhere defined')
tt = SR.var('tt')
uu = SR.var('uu')
fd, f0 = parameters[0]
gd, g0 = next(other.items())
if len(f)==1 and len(g)==1:
f = f.unextend_zero()
g = g.unextend_zero()
a1 = fd[0].lower()
a2 = fd[0].upper()
b1 = gd[0].lower()
b2 = gd[0].upper()
i1 = f0.subs({variable: uu})
i2 = g0.subs({variable: tt-uu})
fg1 = definite_integral(i1*i2, uu, a1, tt-b1).subs(tt = variable)
fg2 = definite_integral(i1*i2, uu, tt-b2, tt-b1).subs(tt = variable)
fg3 = definite_integral(i1*i2, uu, tt-b2, a2).subs(tt = variable)
fg4 = definite_integral(i1*i2, uu, a1, a2).subs(tt = variable)
if a1-b1<a2-b2:
if a2+b1!=a1+b2:
h = piecewise([[(a1+b1,a1+b2),fg1],[(a1+b2,a2+b1),fg2],[(a2+b1,a2+b2),fg3]])
else:
h = piecewise([[(a1+b1,a1+b2),fg1],[(a1+b2,a2+b2),fg3]])
else:
if a1+b2!=a2+b1:
h = piecewise([[(a1+b1,a2+b1),fg1],[(a2+b1,a1+b2),fg4],[(a1+b2,a2+b2),fg3]])
else:
h = piecewise([[(a1+b1,a2+b1),fg1],[(a2+b1,a2+b2),fg3]])
return (piecewise([[(minus_infinity,infinity),0]]).piecewise_add(h)).unextend_zero()
if len(f)>1 or len(g)>1:
z = piecewise([[(0,0),0]])
for fpiece in f.pieces():
for gpiece in g.pieces():
h = gpiece.convolution(fpiece)
z = z.piecewise_add(h)
return z.unextend_zero()
def trapezoid(self, parameters, variable, N):
def func(x0, x1):
f0, f1 = self(x0), self(x1)
return [[(x0,x1), f0 + (f1-f0) * (x1-x0)**(-1)
* (self.default_variable()-x0)]]
rsum = []
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
h = (b-a)/N
for i in range(N):
x0 = a+i*h
x1 = a+(i+1)*h
rsum += func(x0, x1)
return piecewise(rsum)
def laplace(self, parameters, variable, x='x', s='t'):
from sage.all import assume, exp, forget
x = SR.var(x)
s = SR.var(s)
assume(s>0)
result = 0
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
result += (SR(f)*exp(-s*x)).integral(x,a,b)
forget(s>0)
return result
def fourier_series_cosine_coefficient(self, parameters,
variable, n, L=None):
from sage.all import cos, pi
L0 = (self.domain().sup() - self.domain().inf()) / 2
if not L:
L = L0
else:
m = L0 / L
if not (m.is_integer() and m > 0):
raise ValueError("the width of the domain of " +
"{} is not a multiple ".format(self) +
"of the given period")
x = SR.var('x')
result = 0
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
result += (f*cos(pi*x*n/L)).integrate(x, a, b)
return SR(result/L0).simplify_trig()
def fourier_series_sine_coefficient(self, parameters, variable,
n, L=None):
from sage.all import sin, pi
L0 = (self.domain().sup() - self.domain().inf()) / 2
if not L:
L = L0
else:
m = L0 / L
if not (m.is_integer() and m > 0):
raise ValueError("the width of the domain of " +
"{} is not a multiple ".format(self) +
"of the given period")
x = SR.var('x')
result = 0
for domain, f in parameters:
for interval in domain:
a = interval.lower()
b = interval.upper()
result += (f*sin(pi*x*n/L)).integrate(x, a, b)
return SR(result/L0).simplify_trig()
def fourier_series_partial_sum(self, parameters, variable, N,
L=None):
from sage.all import pi, sin, cos, srange
if not L:
L = (self.domain().sup() - self.domain().inf()) / 2
x = self.default_variable()
a0 = self.fourier_series_cosine_coefficient(0, L)
result = a0/2 + sum([(self.fourier_series_cosine_coefficient(n, L)*cos(n*pi*x/L) +
self.fourier_series_sine_coefficient(n, L)*sin(n*pi*x/L))
for n in srange(1, N+1)])
return SR(result).expand()
def _sympy_(self, parameters, variable):
from sympy import Piecewise as pw
args = [(func._sympy_(),
domain._sympy_condition_(variable))
for domain, func in parameters]
return pw(*args)
piecewise = PiecewiseFunction()
| true
| true
|
790b40cd1d4b459ce34cb5c965af23b74dee9a12
| 444
|
py
|
Python
|
accounts/migrations/0004_auto_20201019_1200.py
|
Dev-Mehta/AskaDev
|
4514383cb1f94178e8082f0b710c7efbdd3225a7
|
[
"MIT"
] | 7
|
2020-08-26T12:32:50.000Z
|
2020-09-20T09:17:12.000Z
|
accounts/migrations/0004_auto_20201019_1200.py
|
Dev-Mehta/AskaDev
|
4514383cb1f94178e8082f0b710c7efbdd3225a7
|
[
"MIT"
] | null | null | null |
accounts/migrations/0004_auto_20201019_1200.py
|
Dev-Mehta/AskaDev
|
4514383cb1f94178e8082f0b710c7efbdd3225a7
|
[
"MIT"
] | 3
|
2020-08-27T06:06:43.000Z
|
2020-10-10T15:53:26.000Z
|
# Generated by Django 3.0 on 2020-10-19 06:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200922_1738'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 23.368421
| 108
| 0.628378
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200922_1738'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| true
| true
|
790b41b3dd8834829b1b5ea6c01ae8d77974220d
| 25,381
|
py
|
Python
|
elasticsearch_dsl/search.py
|
cfpb/elasticsearch-dsl-py
|
8abbeca9c000074eb1d627272790a97233848f8e
|
[
"Apache-2.0"
] | null | null | null |
elasticsearch_dsl/search.py
|
cfpb/elasticsearch-dsl-py
|
8abbeca9c000074eb1d627272790a97233848f8e
|
[
"Apache-2.0"
] | null | null | null |
elasticsearch_dsl/search.py
|
cfpb/elasticsearch-dsl-py
|
8abbeca9c000074eb1d627272790a97233848f8e
|
[
"Apache-2.0"
] | 1
|
2020-10-01T14:52:27.000Z
|
2020-10-01T14:52:27.000Z
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from six import iteritems, string_types
from elasticsearch7.helpers import scan
from elasticsearch7.exceptions import TransportError
from .query import Q, Bool
from .aggs import A, AggBase
from .utils import DslBase, AttrDict
from .response import Response, Hit
from .connections import get_connection
from .exceptions import IllegalOperation
class QueryProxy(object):
"""
Simple proxy around DSL objects (queries) that can be called
(to add query/post_filter) and also allows attribute access which is proxied to
the wrapped query.
"""
def __init__(self, search, attr_name):
self._search = search
self._proxied = None
self._attr_name = attr_name
def __nonzero__(self):
return self._proxied is not None
__bool__ = __nonzero__
def __call__(self, *args, **kwargs):
s = self._search._clone()
# we cannot use self._proxied since we just cloned self._search and
# need to access the new self on the clone
proxied = getattr(s, self._attr_name)
if proxied._proxied is None:
proxied._proxied = Q(*args, **kwargs)
else:
proxied._proxied &= Q(*args, **kwargs)
# always return search to be chainable
return s
def __getattr__(self, attr_name):
return getattr(self._proxied, attr_name)
def __setattr__(self, attr_name, value):
if not attr_name.startswith("_"):
self._proxied = Q(self._proxied.to_dict())
setattr(self._proxied, attr_name, value)
super(QueryProxy, self).__setattr__(attr_name, value)
def __getstate__(self):
return self._search, self._proxied, self._attr_name
def __setstate__(self, state):
self._search, self._proxied, self._attr_name = state
class ProxyDescriptor(object):
"""
Simple descriptor to enable setting of queries and filters as:
s = Search()
s.query = Q(...)
"""
def __init__(self, name):
self._attr_name = "_%s_proxy" % name
def __get__(self, instance, owner):
return getattr(instance, self._attr_name)
def __set__(self, instance, value):
proxy = getattr(instance, self._attr_name)
proxy._proxied = Q(value)
class AggsProxy(AggBase, DslBase):
name = "aggs"
def __init__(self, search):
self._base = self
self._search = search
self._params = {"aggs": {}}
def to_dict(self):
return super(AggsProxy, self).to_dict().get("aggs", {})
class Request(object):
def __init__(self, using="default", index=None, doc_type=None, extra=None):
self._using = using
self._index = None
if isinstance(index, (tuple, list)):
self._index = list(index)
elif index:
self._index = [index]
self._doc_type = []
self._doc_type_map = {}
if isinstance(doc_type, (tuple, list)):
self._doc_type.extend(doc_type)
elif isinstance(doc_type, collections_abc.Mapping):
self._doc_type.extend(doc_type.keys())
self._doc_type_map.update(doc_type)
elif doc_type:
self._doc_type.append(doc_type)
self._params = {}
self._extra = extra or {}
def __eq__(self, other):
return (
isinstance(other, Request)
and other._params == self._params
and other._index == self._index
and other._doc_type == self._doc_type
and other.to_dict() == self.to_dict()
)
def __copy__(self):
return self._clone()
def params(self, **kwargs):
"""
Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local')
"""
s = self._clone()
s._params.update(kwargs)
return s
def index(self, *index):
"""
Set the index for the search. If called empty it will remove all information.
Example:
s = Search()
s = s.index('twitter-2015.01.01', 'twitter-2015.01.02')
s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])
"""
# .index() resets
s = self._clone()
if not index:
s._index = None
else:
indexes = []
for i in index:
if isinstance(i, string_types):
indexes.append(i)
elif isinstance(i, list):
indexes += i
elif isinstance(i, tuple):
indexes += list(i)
s._index = (self._index or []) + indexes
return s
def _resolve_field(self, path):
for dt in self._doc_type:
if not hasattr(dt, "_index"):
continue
field = dt._index.resolve_field(path)
if field is not None:
return field
def _resolve_nested(self, hit, parent_class=None):
doc_class = Hit
nested_path = []
nesting = hit["_nested"]
while nesting and "field" in nesting:
nested_path.append(nesting["field"])
nesting = nesting.get("_nested")
nested_path = ".".join(nested_path)
if hasattr(parent_class, "_index"):
nested_field = parent_class._index.resolve_field(nested_path)
else:
nested_field = self._resolve_field(nested_path)
if nested_field is not None:
return nested_field._doc_class
return doc_class
def _get_result(self, hit, parent_class=None):
doc_class = Hit
dt = hit.get("_type")
if "_nested" in hit:
doc_class = self._resolve_nested(hit, parent_class)
elif dt in self._doc_type_map:
doc_class = self._doc_type_map[dt]
else:
for doc_type in self._doc_type:
if hasattr(doc_type, "_matches") and doc_type._matches(hit):
doc_class = doc_type
break
for t in hit.get("inner_hits", ()):
hit["inner_hits"][t] = Response(
self, hit["inner_hits"][t], doc_class=doc_class
)
callback = getattr(doc_class, "from_es", doc_class)
return callback(hit)
def doc_type(self, *doc_type, **kwargs):
"""
Set the type to search through. You can supply a single value or
multiple. Values can be strings or subclasses of ``Document``.
You can also pass in any keyword arguments, mapping a doc_type to a
callback that should be used instead of the Hit class.
If no doc_type is supplied any information stored on the instance will
be erased.
Example:
s = Search().doc_type('product', 'store', User, custom=my_callback)
"""
# .doc_type() resets
s = self._clone()
if not doc_type and not kwargs:
s._doc_type = []
s._doc_type_map = {}
else:
s._doc_type.extend(doc_type)
s._doc_type.extend(kwargs.keys())
s._doc_type_map.update(kwargs)
return s
def using(self, client):
"""
Associate the search request with an elasticsearch client. A fresh copy
will be returned with current instance remaining unchanged.
:arg client: an instance of ``elasticsearch.Elasticsearch`` to use or
an alias to look up in ``elasticsearch_dsl.connections``
"""
s = self._clone()
s._using = client
return s
def extra(self, **kwargs):
"""
Add extra keys to the request body. Mostly here for backwards
compatibility.
"""
s = self._clone()
if "from_" in kwargs:
kwargs["from"] = kwargs.pop("from_")
s._extra.update(kwargs)
return s
def _clone(self):
s = self.__class__(
using=self._using, index=self._index, doc_type=self._doc_type
)
s._doc_type_map = self._doc_type_map.copy()
s._extra = self._extra.copy()
s._params = self._params.copy()
return s
class Search(Request):
query = ProxyDescriptor("query")
post_filter = ProxyDescriptor("post_filter")
def __init__(self, **kwargs):
"""
Search request to elasticsearch.
:arg using: `Elasticsearch` instance to use
:arg index: limit the search to index
:arg doc_type: only query this type.
All the parameters supplied (or omitted) at creation type can be later
overridden by methods (`using`, `index` and `doc_type` respectively).
"""
super(Search, self).__init__(**kwargs)
self.aggs = AggsProxy(self)
self._sort = []
self._source = None
self._highlight = {}
self._highlight_opts = {}
self._suggest = {}
self._script_fields = {}
self._response_class = Response
self._query_proxy = QueryProxy(self, "query")
self._post_filter_proxy = QueryProxy(self, "post_filter")
def filter(self, *args, **kwargs):
return self.query(Bool(filter=[Q(*args, **kwargs)]))
def exclude(self, *args, **kwargs):
return self.query(Bool(filter=[~Q(*args, **kwargs)]))
def __iter__(self):
"""
Iterate over the hits.
"""
return iter(self.execute())
def __getitem__(self, n):
"""
Support slicing the `Search` instance for pagination.
Slicing equates to the from/size parameters. E.g.::
s = Search().query(...)[0:25]
is equivalent to::
s = Search().query(...).extra(from_=0, size=25)
"""
s = self._clone()
if isinstance(n, slice):
# If negative slicing, abort.
if n.start and n.start < 0 or n.stop and n.stop < 0:
raise ValueError("Search does not support negative slicing.")
# Elasticsearch won't get all results so we default to size: 10 if
# stop not given.
s._extra["from"] = n.start or 0
s._extra["size"] = max(
0, n.stop - (n.start or 0) if n.stop is not None else 10
)
return s
else: # This is an index lookup, equivalent to slicing by [n:n+1].
# If negative index, abort.
if n < 0:
raise ValueError("Search does not support negative indexing.")
s._extra["from"] = n
s._extra["size"] = 1
return s
@classmethod
def from_dict(cls, d):
"""
Construct a new `Search` instance from a raw dict containing the search
body. Useful when migrating from raw dictionaries.
Example::
s = Search.from_dict({
"query": {
"bool": {
"must": [...]
}
},
"aggs": {...}
})
s = s.filter('term', published=True)
"""
s = cls()
s.update_from_dict(d)
return s
def _clone(self):
"""
Return a clone of the current search request. Performs a shallow copy
of all the underlying objects. Used internally by most state modifying
APIs.
"""
s = super(Search, self)._clone()
s._response_class = self._response_class
s._sort = self._sort[:]
s._source = copy.copy(self._source) if self._source is not None else None
s._highlight = self._highlight.copy()
s._highlight_opts = self._highlight_opts.copy()
s._suggest = self._suggest.copy()
s._script_fields = self._script_fields.copy()
for x in ("query", "post_filter"):
getattr(s, x)._proxied = getattr(self, x)._proxied
# copy top-level bucket definitions
if self.aggs._params.get("aggs"):
s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()}
return s
def response_class(self, cls):
"""
Override the default wrapper used for the response.
"""
s = self._clone()
s._response_class = cls
return s
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if "query" in d:
self.query._proxied = Q(d.pop("query"))
if "post_filter" in d:
self.post_filter._proxied = Q(d.pop("post_filter"))
aggs = d.pop("aggs", d.pop("aggregations", {}))
if aggs:
self.aggs._params = {
"aggs": {name: A(value) for (name, value) in iteritems(aggs)}
}
if "sort" in d:
self._sort = d.pop("sort")
if "_source" in d:
self._source = d.pop("_source")
if "highlight" in d:
high = d.pop("highlight").copy()
self._highlight = high.pop("fields")
self._highlight_opts = high
if "suggest" in d:
self._suggest = d.pop("suggest")
if "text" in self._suggest:
text = self._suggest.pop("text")
for s in self._suggest.values():
s.setdefault("text", text)
if "script_fields" in d:
self._script_fields = d.pop("script_fields")
self._extra.update(d)
return self
def script_fields(self, **kwargs):
"""
Define script fields to be calculated on hits. See
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html
for more details.
Example::
s = Search()
s = s.script_fields(times_two="doc['field'].value * 2")
s = s.script_fields(
times_three={
'script': {
'inline': "doc['field'].value * params.n",
'params': {'n': 3}
}
}
)
"""
s = self._clone()
for name in kwargs:
if isinstance(kwargs[name], string_types):
kwargs[name] = {"script": kwargs[name]}
s._script_fields.update(kwargs)
return s
def source(self, fields=None, **kwargs):
"""
Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'includes' and/or
'excludes' the fields will be either included or excluded appropriately.
Calling this multiple times with the same named parameter will override the
previous values with the new ones.
Example::
s = Search()
s = s.source(includes=['obj1.*'], excludes=["*.description"])
s = Search()
s = s.source(includes=['obj1.*']).source(excludes=["*.description"])
"""
s = self._clone()
if fields and kwargs:
raise ValueError("You cannot specify fields and kwargs at the same time.")
if fields is not None:
s._source = fields
return s
if kwargs and not isinstance(s._source, dict):
s._source = {}
for key, value in kwargs.items():
if value is None:
try:
del s._source[key]
except KeyError:
pass
else:
s._source[key] = value
return s
def sort(self, *keys):
"""
Add sorting information to the search request. If called without
arguments it will remove all sort requirements. Otherwise it will
replace them. Acceptable arguments are::
'some.field'
'-some.other.field'
{'different.field': {'any': 'dict'}}
so for example::
s = Search().sort(
'category',
'-title',
{"price" : {"order" : "asc", "mode" : "avg"}}
)
will sort by ``category``, ``title`` (in descending order) and
``price`` in ascending order using the ``avg`` mode.
The API returns a copy of the Search object and can thus be chained.
"""
s = self._clone()
s._sort = []
for k in keys:
if isinstance(k, string_types) and k.startswith("-"):
if k[1:] == "_score":
raise IllegalOperation("Sorting by `-_score` is not allowed.")
k = {k[1:]: {"order": "desc"}}
s._sort.append(k)
return s
def highlight_options(self, **kwargs):
"""
Update the global highlighting options used for this request. For
example::
s = Search()
s = s.highlight_options(order='score')
"""
s = self._clone()
s._highlight_opts.update(kwargs)
return s
def highlight(self, *fields, **kwargs):
"""
Request highlighting of some fields. All keyword arguments passed in will be
used as parameters for all the fields in the ``fields`` parameter. Example::
Search().highlight('title', 'body', fragment_size=50)
will produce the equivalent of::
{
"highlight": {
"fields": {
"body": {"fragment_size": 50},
"title": {"fragment_size": 50}
}
}
}
If you want to have different options for different fields
you can call ``highlight`` twice::
Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100)
which will produce::
{
"highlight": {
"fields": {
"body": {"fragment_size": 100},
"title": {"fragment_size": 50}
}
}
}
"""
s = self._clone()
for f in fields:
s._highlight[f] = kwargs
return s
def suggest(self, name, text, **kwargs):
"""
Add a suggestions request to the search.
:arg name: name of the suggestion
:arg text: text to suggest on
All keyword arguments will be added to the suggestions body. For example::
s = Search()
s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})
"""
s = self._clone()
s._suggest[name] = {"text": text}
s._suggest[name].update(kwargs)
return s
def to_dict(self, count=False, **kwargs):
"""
Serialize the search into the dictionary that will be sent over as the
request's body.
:arg count: a flag to specify if we are interested in a body for count -
no aggregations, no pagination bounds etc.
All additional keyword arguments will be included into the dictionary.
"""
d = {}
if self.query:
d["query"] = self.query.to_dict()
# count request doesn't care for sorting and other things
if not count:
if self.post_filter:
d["post_filter"] = self.post_filter.to_dict()
if self.aggs.aggs:
d.update(self.aggs.to_dict())
if self._sort:
d["sort"] = self._sort
d.update(self._extra)
if self._source not in (None, {}):
d["_source"] = self._source
if self._highlight:
d["highlight"] = {"fields": self._highlight}
d["highlight"].update(self._highlight_opts)
if self._suggest:
d["suggest"] = self._suggest
if self._script_fields:
d["script_fields"] = self._script_fields
d.update(kwargs)
return d
def count(self):
"""
Return the number of hits matching the query and filters. Note that
only the actual number is returned.
"""
if hasattr(self, "_response") and self._response.hits.total.relation == "eq":
return self._response.hits.total.value
es = get_connection(self._using)
d = self.to_dict(count=True)
# TODO: failed shards detection
return es.count(index=self._index, body=d, **self._params)["count"]
def execute(self, ignore_cache=False):
"""
Execute the search and return an instance of ``Response`` wrapping all
the data.
:arg ignore_cache: if set to ``True``, consecutive calls will hit
ES, while cached result will be ignored. Defaults to `False`
"""
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
self._response = self._response_class(
self, es.search(index=self._index, body=self.to_dict(), **self._params)
)
return self._response
def scan(self):
"""
Turn the search into a scan search and return a generator that will
iterate over all the documents matching the query.
Use ``params`` method to specify any additional arguments you with to
pass to the underlying ``scan`` helper from ``elasticsearch-py`` -
https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan
"""
es = get_connection(self._using)
for hit in scan(es, query=self.to_dict(), index=self._index, **self._params):
yield self._get_result(hit)
def delete(self):
"""
delete() executes the query by delegating to delete_by_query()
"""
es = get_connection(self._using)
return AttrDict(
es.delete_by_query(index=self._index, body=self.to_dict(), **self._params)
)
class MultiSearch(Request):
"""
Combine multiple :class:`~elasticsearch_dsl.Search` objects into a single
request.
"""
def __init__(self, **kwargs):
super(MultiSearch, self).__init__(**kwargs)
self._searches = []
def __getitem__(self, key):
return self._searches[key]
def __iter__(self):
return iter(self._searches)
def _clone(self):
ms = super(MultiSearch, self)._clone()
ms._searches = self._searches[:]
return ms
def add(self, search):
"""
Adds a new :class:`~elasticsearch_dsl.Search` object to the request::
ms = MultiSearch(index='my-index')
ms = ms.add(Search(doc_type=Category).filter('term', category='python'))
ms = ms.add(Search(doc_type=Blog))
"""
ms = self._clone()
ms._searches.append(search)
return ms
def to_dict(self):
out = []
for s in self._searches:
meta = {}
if s._index:
meta["index"] = s._index
meta.update(s._params)
out.append(meta)
out.append(s.to_dict())
return out
def execute(self, ignore_cache=False, raise_on_error=True):
"""
Execute the multi search request and return a list of search results.
"""
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
responses = es.msearch(
index=self._index, body=self.to_dict(), **self._params
)
out = []
for s, r in zip(self._searches, responses["responses"]):
if r.get("error", False):
if raise_on_error:
raise TransportError("N/A", r["error"]["type"], r["error"])
r = None
else:
r = Response(s, r)
out.append(r)
self._response = out
return self._response
| 31.104167
| 105
| 0.560065
|
import copy
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
from six import iteritems, string_types
from elasticsearch7.helpers import scan
from elasticsearch7.exceptions import TransportError
from .query import Q, Bool
from .aggs import A, AggBase
from .utils import DslBase, AttrDict
from .response import Response, Hit
from .connections import get_connection
from .exceptions import IllegalOperation
class QueryProxy(object):
def __init__(self, search, attr_name):
self._search = search
self._proxied = None
self._attr_name = attr_name
def __nonzero__(self):
return self._proxied is not None
__bool__ = __nonzero__
def __call__(self, *args, **kwargs):
s = self._search._clone()
proxied = getattr(s, self._attr_name)
if proxied._proxied is None:
proxied._proxied = Q(*args, **kwargs)
else:
proxied._proxied &= Q(*args, **kwargs)
return s
def __getattr__(self, attr_name):
return getattr(self._proxied, attr_name)
def __setattr__(self, attr_name, value):
if not attr_name.startswith("_"):
self._proxied = Q(self._proxied.to_dict())
setattr(self._proxied, attr_name, value)
super(QueryProxy, self).__setattr__(attr_name, value)
def __getstate__(self):
return self._search, self._proxied, self._attr_name
def __setstate__(self, state):
self._search, self._proxied, self._attr_name = state
class ProxyDescriptor(object):
def __init__(self, name):
self._attr_name = "_%s_proxy" % name
def __get__(self, instance, owner):
return getattr(instance, self._attr_name)
def __set__(self, instance, value):
proxy = getattr(instance, self._attr_name)
proxy._proxied = Q(value)
class AggsProxy(AggBase, DslBase):
name = "aggs"
def __init__(self, search):
self._base = self
self._search = search
self._params = {"aggs": {}}
def to_dict(self):
return super(AggsProxy, self).to_dict().get("aggs", {})
class Request(object):
def __init__(self, using="default", index=None, doc_type=None, extra=None):
self._using = using
self._index = None
if isinstance(index, (tuple, list)):
self._index = list(index)
elif index:
self._index = [index]
self._doc_type = []
self._doc_type_map = {}
if isinstance(doc_type, (tuple, list)):
self._doc_type.extend(doc_type)
elif isinstance(doc_type, collections_abc.Mapping):
self._doc_type.extend(doc_type.keys())
self._doc_type_map.update(doc_type)
elif doc_type:
self._doc_type.append(doc_type)
self._params = {}
self._extra = extra or {}
def __eq__(self, other):
return (
isinstance(other, Request)
and other._params == self._params
and other._index == self._index
and other._doc_type == self._doc_type
and other.to_dict() == self.to_dict()
)
def __copy__(self):
return self._clone()
def params(self, **kwargs):
s = self._clone()
s._params.update(kwargs)
return s
def index(self, *index):
s = self._clone()
if not index:
s._index = None
else:
indexes = []
for i in index:
if isinstance(i, string_types):
indexes.append(i)
elif isinstance(i, list):
indexes += i
elif isinstance(i, tuple):
indexes += list(i)
s._index = (self._index or []) + indexes
return s
def _resolve_field(self, path):
for dt in self._doc_type:
if not hasattr(dt, "_index"):
continue
field = dt._index.resolve_field(path)
if field is not None:
return field
def _resolve_nested(self, hit, parent_class=None):
doc_class = Hit
nested_path = []
nesting = hit["_nested"]
while nesting and "field" in nesting:
nested_path.append(nesting["field"])
nesting = nesting.get("_nested")
nested_path = ".".join(nested_path)
if hasattr(parent_class, "_index"):
nested_field = parent_class._index.resolve_field(nested_path)
else:
nested_field = self._resolve_field(nested_path)
if nested_field is not None:
return nested_field._doc_class
return doc_class
def _get_result(self, hit, parent_class=None):
doc_class = Hit
dt = hit.get("_type")
if "_nested" in hit:
doc_class = self._resolve_nested(hit, parent_class)
elif dt in self._doc_type_map:
doc_class = self._doc_type_map[dt]
else:
for doc_type in self._doc_type:
if hasattr(doc_type, "_matches") and doc_type._matches(hit):
doc_class = doc_type
break
for t in hit.get("inner_hits", ()):
hit["inner_hits"][t] = Response(
self, hit["inner_hits"][t], doc_class=doc_class
)
callback = getattr(doc_class, "from_es", doc_class)
return callback(hit)
def doc_type(self, *doc_type, **kwargs):
s = self._clone()
if not doc_type and not kwargs:
s._doc_type = []
s._doc_type_map = {}
else:
s._doc_type.extend(doc_type)
s._doc_type.extend(kwargs.keys())
s._doc_type_map.update(kwargs)
return s
def using(self, client):
s = self._clone()
s._using = client
return s
def extra(self, **kwargs):
s = self._clone()
if "from_" in kwargs:
kwargs["from"] = kwargs.pop("from_")
s._extra.update(kwargs)
return s
def _clone(self):
s = self.__class__(
using=self._using, index=self._index, doc_type=self._doc_type
)
s._doc_type_map = self._doc_type_map.copy()
s._extra = self._extra.copy()
s._params = self._params.copy()
return s
class Search(Request):
query = ProxyDescriptor("query")
post_filter = ProxyDescriptor("post_filter")
def __init__(self, **kwargs):
super(Search, self).__init__(**kwargs)
self.aggs = AggsProxy(self)
self._sort = []
self._source = None
self._highlight = {}
self._highlight_opts = {}
self._suggest = {}
self._script_fields = {}
self._response_class = Response
self._query_proxy = QueryProxy(self, "query")
self._post_filter_proxy = QueryProxy(self, "post_filter")
def filter(self, *args, **kwargs):
return self.query(Bool(filter=[Q(*args, **kwargs)]))
def exclude(self, *args, **kwargs):
return self.query(Bool(filter=[~Q(*args, **kwargs)]))
def __iter__(self):
return iter(self.execute())
def __getitem__(self, n):
s = self._clone()
if isinstance(n, slice):
if n.start and n.start < 0 or n.stop and n.stop < 0:
raise ValueError("Search does not support negative slicing.")
# stop not given.
s._extra["from"] = n.start or 0
s._extra["size"] = max(
0, n.stop - (n.start or 0) if n.stop is not None else 10
)
return s
else: # This is an index lookup, equivalent to slicing by [n:n+1].
# If negative index, abort.
if n < 0:
raise ValueError("Search does not support negative indexing.")
s._extra["from"] = n
s._extra["size"] = 1
return s
@classmethod
def from_dict(cls, d):
s = cls()
s.update_from_dict(d)
return s
def _clone(self):
s = super(Search, self)._clone()
s._response_class = self._response_class
s._sort = self._sort[:]
s._source = copy.copy(self._source) if self._source is not None else None
s._highlight = self._highlight.copy()
s._highlight_opts = self._highlight_opts.copy()
s._suggest = self._suggest.copy()
s._script_fields = self._script_fields.copy()
for x in ("query", "post_filter"):
getattr(s, x)._proxied = getattr(self, x)._proxied
# copy top-level bucket definitions
if self.aggs._params.get("aggs"):
s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()}
return s
def response_class(self, cls):
s = self._clone()
s._response_class = cls
return s
def update_from_dict(self, d):
d = d.copy()
if "query" in d:
self.query._proxied = Q(d.pop("query"))
if "post_filter" in d:
self.post_filter._proxied = Q(d.pop("post_filter"))
aggs = d.pop("aggs", d.pop("aggregations", {}))
if aggs:
self.aggs._params = {
"aggs": {name: A(value) for (name, value) in iteritems(aggs)}
}
if "sort" in d:
self._sort = d.pop("sort")
if "_source" in d:
self._source = d.pop("_source")
if "highlight" in d:
high = d.pop("highlight").copy()
self._highlight = high.pop("fields")
self._highlight_opts = high
if "suggest" in d:
self._suggest = d.pop("suggest")
if "text" in self._suggest:
text = self._suggest.pop("text")
for s in self._suggest.values():
s.setdefault("text", text)
if "script_fields" in d:
self._script_fields = d.pop("script_fields")
self._extra.update(d)
return self
def script_fields(self, **kwargs):
s = self._clone()
for name in kwargs:
if isinstance(kwargs[name], string_types):
kwargs[name] = {"script": kwargs[name]}
s._script_fields.update(kwargs)
return s
def source(self, fields=None, **kwargs):
s = self._clone()
if fields and kwargs:
raise ValueError("You cannot specify fields and kwargs at the same time.")
if fields is not None:
s._source = fields
return s
if kwargs and not isinstance(s._source, dict):
s._source = {}
for key, value in kwargs.items():
if value is None:
try:
del s._source[key]
except KeyError:
pass
else:
s._source[key] = value
return s
def sort(self, *keys):
s = self._clone()
s._sort = []
for k in keys:
if isinstance(k, string_types) and k.startswith("-"):
if k[1:] == "_score":
raise IllegalOperation("Sorting by `-_score` is not allowed.")
k = {k[1:]: {"order": "desc"}}
s._sort.append(k)
return s
def highlight_options(self, **kwargs):
s = self._clone()
s._highlight_opts.update(kwargs)
return s
def highlight(self, *fields, **kwargs):
s = self._clone()
for f in fields:
s._highlight[f] = kwargs
return s
def suggest(self, name, text, **kwargs):
s = self._clone()
s._suggest[name] = {"text": text}
s._suggest[name].update(kwargs)
return s
def to_dict(self, count=False, **kwargs):
d = {}
if self.query:
d["query"] = self.query.to_dict()
# count request doesn't care for sorting and other things
if not count:
if self.post_filter:
d["post_filter"] = self.post_filter.to_dict()
if self.aggs.aggs:
d.update(self.aggs.to_dict())
if self._sort:
d["sort"] = self._sort
d.update(self._extra)
if self._source not in (None, {}):
d["_source"] = self._source
if self._highlight:
d["highlight"] = {"fields": self._highlight}
d["highlight"].update(self._highlight_opts)
if self._suggest:
d["suggest"] = self._suggest
if self._script_fields:
d["script_fields"] = self._script_fields
d.update(kwargs)
return d
def count(self):
if hasattr(self, "_response") and self._response.hits.total.relation == "eq":
return self._response.hits.total.value
es = get_connection(self._using)
d = self.to_dict(count=True)
return es.count(index=self._index, body=d, **self._params)["count"]
def execute(self, ignore_cache=False):
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
self._response = self._response_class(
self, es.search(index=self._index, body=self.to_dict(), **self._params)
)
return self._response
def scan(self):
es = get_connection(self._using)
for hit in scan(es, query=self.to_dict(), index=self._index, **self._params):
yield self._get_result(hit)
def delete(self):
es = get_connection(self._using)
return AttrDict(
es.delete_by_query(index=self._index, body=self.to_dict(), **self._params)
)
class MultiSearch(Request):
def __init__(self, **kwargs):
super(MultiSearch, self).__init__(**kwargs)
self._searches = []
def __getitem__(self, key):
return self._searches[key]
def __iter__(self):
return iter(self._searches)
def _clone(self):
ms = super(MultiSearch, self)._clone()
ms._searches = self._searches[:]
return ms
def add(self, search):
ms = self._clone()
ms._searches.append(search)
return ms
def to_dict(self):
out = []
for s in self._searches:
meta = {}
if s._index:
meta["index"] = s._index
meta.update(s._params)
out.append(meta)
out.append(s.to_dict())
return out
def execute(self, ignore_cache=False, raise_on_error=True):
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
responses = es.msearch(
index=self._index, body=self.to_dict(), **self._params
)
out = []
for s, r in zip(self._searches, responses["responses"]):
if r.get("error", False):
if raise_on_error:
raise TransportError("N/A", r["error"]["type"], r["error"])
r = None
else:
r = Response(s, r)
out.append(r)
self._response = out
return self._response
| true
| true
|
790b42c839725716a3e3214198a6178998c23ccf
| 7,887
|
py
|
Python
|
examples/wmt/tools/align/extract_bilingual_vocabulary.py
|
godweiyang/ParaGen
|
9665d1244ea38a41fc06b4e0a7f6411985e2221f
|
[
"Apache-2.0"
] | 50
|
2022-01-18T07:25:46.000Z
|
2022-03-14T13:06:18.000Z
|
examples/wmt/tools/align/extract_bilingual_vocabulary.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 2
|
2022-01-19T09:36:42.000Z
|
2022-02-23T07:16:02.000Z
|
examples/wmt/tools/align/extract_bilingual_vocabulary.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 6
|
2022-01-19T09:28:53.000Z
|
2022-03-10T10:20:08.000Z
|
import argparse
import json
import os
from collections import Counter, defaultdict
from helper import _is_token_alnum
THRESHOLD = 0.01
GAP = 10
def get_full_mapping(src_filename, trg_filename, align_filename,
mapping_filename, reverse_src2trg=False, lowercase=True):
""" Get full mapping give align.
Args:
src_filename:
trg_filename:
align_filename:
mapping_filename:
reverse_src2trg:
lowercase:
Returns:
"""
print('src: {}, trg: {}, align: {}, mapping: {}, reverse: {}'.format(
src_filename, trg_filename, align_filename, mapping_filename,
reverse_src2trg))
src2trg_mapping = defaultdict(lambda: defaultdict(int))
processed_line = 0
with open(src_filename) as fs, open(trg_filename) as ft, open(
align_filename) as fa:
for ls, lt, la in zip(fs, ft, fa):
if lowercase:
ls = ls.lower()
lt = lt.lower()
processed_line += 1
ls_words = ls.split()
lt_words = lt.split()
la_aligns = la.split()
src_pos_counter = Counter()
trg_pos_counter = Counter()
valid_src_pos = set()
valid_trg_pos = set()
for align in la_aligns:
# only consider one-to-one mapping
src_pos, trg_pos = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
# only consider alpha number token
if _is_token_alnum(ls_words[src_pos]):
src_pos_counter[src_pos] += 1
if _is_token_alnum(lt_words[trg_pos]):
trg_pos_counter[trg_pos] += 1
# ignore token that aligned twice
for pos, c in src_pos_counter.items():
if c == 1:
valid_src_pos.add(pos)
for pos, c in trg_pos_counter.items():
if c == 1:
valid_trg_pos.add(pos)
for align in la_aligns:
src_pos, trg_pos = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if _is_token_alnum(ls_words[src_pos]) and _is_token_alnum(
lt_words[trg_pos]) and (src_pos in valid_src_pos) and (
trg_pos in valid_trg_pos):
if reverse_src2trg:
src2trg_mapping[lt_words[trg_pos]][
ls_words[src_pos]] += 1
else:
src2trg_mapping[ls_words[src_pos]][
lt_words[trg_pos]] += 1
if processed_line % 1000000 == 0:
print('{} done.'.format(processed_line))
with open(mapping_filename, 'w') as fw:
print('dump to {} ...'.format(mapping_filename))
json.dump(src2trg_mapping, fw)
return src2trg_mapping
def refine_dict(full_mapping, clean_dict_filename, threshold, ignore_gap):
""" Clean dictionary based on frequency and gap of frequency.
For example,
{'s1': ['t1': 999, 't2': 199, 't3':1],
's2': ['m1': 2000, 'm2': 100]}
=>
{'s1': ['t1': 999, 't2': 199],
's2': ['m1': 2000]}
Args:
full_mapping:
clean_dict_filename:
threshold:
ignore_gap:
Returns:
"""
print('Refine dict to {}, threshold: {}, ignore_gap: {} ...'.format(
clean_dict_filename, threshold, ignore_gap))
full_mapping = sorted(
full_mapping.items(),
key=lambda x: sum(x[1].values()),
reverse=True)
with open(clean_dict_filename, 'w') as fw:
for idx, src2trg in enumerate(full_mapping):
src = src2trg[0]
trg = sorted(src2trg[1].items(), key=lambda x: x[1], reverse=True)
total_count = sum(c[1] for c in trg)
clean_trg = dict()
p = trg[0][1]
for w, c in trg:
if c / total_count < threshold:
# too rare
break
if (p / c > ignore_gap) and (c / total_count < THRESHOLD * 5):
# large gap
break
p = c
clean_trg.update({w: round(c / total_count, 3)})
fw.write('{}\n'.format(json.dumps({src: clean_trg}, ensure_ascii=False)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Process alignments and do filter')
parser.add_argument('--src_filename',
help='Origin src file name before bsp',
type=str,
required=True)
parser.add_argument('--trg_filename',
help='Origin trg file name before bsp',
type=str,
required=True)
parser.add_argument('--align_filename',
help='align file name by atools',
type=str,
required=True)
parser.add_argument('--dict_filename',
help='clean dict file name',
type=str,
required=True)
parser.add_argument('--threshold',
help='threshold of ignore frequency',
type=float,
default=THRESHOLD)
parser.add_argument('--ignore_gap',
help='gap of ignore frequency',
type=float,
default=GAP)
parser.add_argument(
'--overwrite', dest='overwrite',
action='store_true', help='Overwrite existing output files')
args = parser.parse_args()
if args.overwrite:
print('Overwrite existing file')
src2trg_mapping_filename = '{}.{}'.format(args.align_filename,
'src2trg_mapping')
trg2src_mapping_filename = '{}.{}'.format(args.align_filename,
'trg2src_mapping')
if os.path.isfile(src2trg_mapping_filename) and (not args.overwrite):
print('loading mapping: {}'.format(src2trg_mapping_filename))
with open(src2trg_mapping_filename) as f:
full_src2trg_mapping = json.load(f)
else:
print('creating mapping: {}'.format(src2trg_mapping_filename))
full_src2trg_mapping = get_full_mapping(args.src_filename,
args.trg_filename,
args.align_filename,
src2trg_mapping_filename,
False)
if os.path.isfile(trg2src_mapping_filename) and (not args.overwrite):
print('loading mapping: {}'.format(trg2src_mapping_filename))
with open(trg2src_mapping_filename) as f:
full_trg2src_mapping = json.load(f)
else:
print('creating mapping: {}'.format(trg2src_mapping_filename))
full_trg2src_mapping = get_full_mapping(args.src_filename,
args.trg_filename,
args.align_filename,
trg2src_mapping_filename,
True)
src2trg_clean_dict_filename = '{}.{}'.format(args.dict_filename,
'src2trg')
refine_dict(full_src2trg_mapping, src2trg_clean_dict_filename,
args.threshold, args.ignore_gap)
trg2src_clean_dict_filename = '{}.{}'.format(args.dict_filename,
'trg2src')
refine_dict(full_trg2src_mapping, trg2src_clean_dict_filename,
args.threshold, args.ignore_gap)
| 37.918269
| 85
| 0.52035
|
import argparse
import json
import os
from collections import Counter, defaultdict
from helper import _is_token_alnum
THRESHOLD = 0.01
GAP = 10
def get_full_mapping(src_filename, trg_filename, align_filename,
mapping_filename, reverse_src2trg=False, lowercase=True):
print('src: {}, trg: {}, align: {}, mapping: {}, reverse: {}'.format(
src_filename, trg_filename, align_filename, mapping_filename,
reverse_src2trg))
src2trg_mapping = defaultdict(lambda: defaultdict(int))
processed_line = 0
with open(src_filename) as fs, open(trg_filename) as ft, open(
align_filename) as fa:
for ls, lt, la in zip(fs, ft, fa):
if lowercase:
ls = ls.lower()
lt = lt.lower()
processed_line += 1
ls_words = ls.split()
lt_words = lt.split()
la_aligns = la.split()
src_pos_counter = Counter()
trg_pos_counter = Counter()
valid_src_pos = set()
valid_trg_pos = set()
for align in la_aligns:
src_pos, trg_pos = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if _is_token_alnum(ls_words[src_pos]):
src_pos_counter[src_pos] += 1
if _is_token_alnum(lt_words[trg_pos]):
trg_pos_counter[trg_pos] += 1
for pos, c in src_pos_counter.items():
if c == 1:
valid_src_pos.add(pos)
for pos, c in trg_pos_counter.items():
if c == 1:
valid_trg_pos.add(pos)
for align in la_aligns:
src_pos, trg_pos = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if _is_token_alnum(ls_words[src_pos]) and _is_token_alnum(
lt_words[trg_pos]) and (src_pos in valid_src_pos) and (
trg_pos in valid_trg_pos):
if reverse_src2trg:
src2trg_mapping[lt_words[trg_pos]][
ls_words[src_pos]] += 1
else:
src2trg_mapping[ls_words[src_pos]][
lt_words[trg_pos]] += 1
if processed_line % 1000000 == 0:
print('{} done.'.format(processed_line))
with open(mapping_filename, 'w') as fw:
print('dump to {} ...'.format(mapping_filename))
json.dump(src2trg_mapping, fw)
return src2trg_mapping
def refine_dict(full_mapping, clean_dict_filename, threshold, ignore_gap):
print('Refine dict to {}, threshold: {}, ignore_gap: {} ...'.format(
clean_dict_filename, threshold, ignore_gap))
full_mapping = sorted(
full_mapping.items(),
key=lambda x: sum(x[1].values()),
reverse=True)
with open(clean_dict_filename, 'w') as fw:
for idx, src2trg in enumerate(full_mapping):
src = src2trg[0]
trg = sorted(src2trg[1].items(), key=lambda x: x[1], reverse=True)
total_count = sum(c[1] for c in trg)
clean_trg = dict()
p = trg[0][1]
for w, c in trg:
if c / total_count < threshold:
break
if (p / c > ignore_gap) and (c / total_count < THRESHOLD * 5):
break
p = c
clean_trg.update({w: round(c / total_count, 3)})
fw.write('{}\n'.format(json.dumps({src: clean_trg}, ensure_ascii=False)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Process alignments and do filter')
parser.add_argument('--src_filename',
help='Origin src file name before bsp',
type=str,
required=True)
parser.add_argument('--trg_filename',
help='Origin trg file name before bsp',
type=str,
required=True)
parser.add_argument('--align_filename',
help='align file name by atools',
type=str,
required=True)
parser.add_argument('--dict_filename',
help='clean dict file name',
type=str,
required=True)
parser.add_argument('--threshold',
help='threshold of ignore frequency',
type=float,
default=THRESHOLD)
parser.add_argument('--ignore_gap',
help='gap of ignore frequency',
type=float,
default=GAP)
parser.add_argument(
'--overwrite', dest='overwrite',
action='store_true', help='Overwrite existing output files')
args = parser.parse_args()
if args.overwrite:
print('Overwrite existing file')
src2trg_mapping_filename = '{}.{}'.format(args.align_filename,
'src2trg_mapping')
trg2src_mapping_filename = '{}.{}'.format(args.align_filename,
'trg2src_mapping')
if os.path.isfile(src2trg_mapping_filename) and (not args.overwrite):
print('loading mapping: {}'.format(src2trg_mapping_filename))
with open(src2trg_mapping_filename) as f:
full_src2trg_mapping = json.load(f)
else:
print('creating mapping: {}'.format(src2trg_mapping_filename))
full_src2trg_mapping = get_full_mapping(args.src_filename,
args.trg_filename,
args.align_filename,
src2trg_mapping_filename,
False)
if os.path.isfile(trg2src_mapping_filename) and (not args.overwrite):
print('loading mapping: {}'.format(trg2src_mapping_filename))
with open(trg2src_mapping_filename) as f:
full_trg2src_mapping = json.load(f)
else:
print('creating mapping: {}'.format(trg2src_mapping_filename))
full_trg2src_mapping = get_full_mapping(args.src_filename,
args.trg_filename,
args.align_filename,
trg2src_mapping_filename,
True)
src2trg_clean_dict_filename = '{}.{}'.format(args.dict_filename,
'src2trg')
refine_dict(full_src2trg_mapping, src2trg_clean_dict_filename,
args.threshold, args.ignore_gap)
trg2src_clean_dict_filename = '{}.{}'.format(args.dict_filename,
'trg2src')
refine_dict(full_trg2src_mapping, trg2src_clean_dict_filename,
args.threshold, args.ignore_gap)
| true
| true
|
790b43070866a446cdd819e9d236dc0f36072554
| 10,302
|
py
|
Python
|
JIT_Baseline/baseline.py
|
ZZR0/ISSTA21-JIT-DP
|
c2916f7c3b1d235ff2858220886d6a7da068bf8a
|
[
"MIT"
] | 14
|
2021-07-12T07:29:57.000Z
|
2022-01-18T07:01:46.000Z
|
JIT_Baseline/baseline.py
|
ZZR0/ISSTA21-JIT-DP
|
c2916f7c3b1d235ff2858220886d6a7da068bf8a
|
[
"MIT"
] | null | null | null |
JIT_Baseline/baseline.py
|
ZZR0/ISSTA21-JIT-DP
|
c2916f7c3b1d235ff2858220886d6a7da068bf8a
|
[
"MIT"
] | 7
|
2021-05-19T21:51:36.000Z
|
2022-03-29T13:57:54.000Z
|
import math
import random
import time
import argparse
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc
import pandas as pd
import numpy as np
import torch.nn as nn
import torch
from LR import LR
from DBN import DBN
parser = argparse.ArgumentParser()
parser.add_argument('-project', type=str,
default='qt')
parser.add_argument('-data', type=str,
default='k')
parser.add_argument('-algorithm', type=str,
default='lr')
parser.add_argument('-drop', type=str,
default='')
parser.add_argument('-only', nargs='+',
default=[])
def evaluation_metrics(y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true=y_true, y_score=y_pred, pos_label=1)
auc_ = auc(fpr, tpr)
y_pred = [1 if p >= 0.5 else 0 for p in y_pred]
acc = accuracy_score(y_true=y_true, y_pred=y_pred)
prc = precision_score(y_true=y_true, y_pred=y_pred)
rc = recall_score(y_true=y_true, y_pred=y_pred)
# f1 = 2 * prc * rc / (prc + rc)
f1 = 0
return acc, prc, rc, f1, auc_
def replace_value_dataframe(df):
df = df.replace({True: 1, False: 0})
df = df.fillna(df.mean())
if args.drop:
df = df.drop(columns=[args.drop])
elif args.only:
df = df[['Unnamed: 0','_id','date','bug','__'] + args.only]
return df.values
def get_features(data):
# return the features of yasu data
return data[:, 5:]
def get_ids(data):
# return the labels of yasu data
return data[:, 1:2].flatten().tolist()
def get_label(data):
data = data[:, 3:4].flatten().tolist()
data = [1 if int(d) > 0 else 0 for d in data]
return data
def load_df_yasu_data(path_data):
data = pd.read_csv(path_data)
data = replace_value_dataframe(df=data)
ids, labels, features = get_ids(data=data), get_label(data=data), get_features(data=data)
indexes = list()
cnt_noexits = 0
for i in range(0, len(ids)):
try:
indexes.append(i)
except FileNotFoundError:
print('File commit id no exits', ids[i], cnt_noexits)
cnt_noexits += 1
ids = [ids[i] for i in indexes]
labels = [labels[i] for i in indexes]
features = features[indexes]
return (ids, np.array(labels), features)
def load_yasu_data(args):
train_path_data = 'data/{}/{}_train.csv'.format(args.project, args.data)
test_path_data = 'data/{}/{}_test.csv'.format(args.project, args.data)
train, test = load_df_yasu_data(train_path_data), load_df_yasu_data(test_path_data)
return train, test
def train_and_evl(data, label, args):
size = int(label.shape[0]*0.2)
auc_ = []
for i in range(5):
idx = size * i
X_e = data[idx:idx+size]
y_e = label[idx:idx+size]
X_t = np.vstack((data[:idx], data[idx+size:]))
y_t = np.hstack((label[:idx], label[idx+size:]))
model = LogisticRegression(max_iter=7000).fit(X_t, y_t)
y_pred = model.predict_proba(X_e)[:, 1]
fpr, tpr, thresholds = roc_curve(y_true=y_e, y_score=y_pred, pos_label=1)
auc_.append(auc(fpr, tpr))
return np.mean(auc_)
def mini_batches_update(X, Y, mini_batch_size=64, seed=0):
m = X.shape[0] # number of training examples
mini_batches = list()
np.random.seed(seed)
# Step 1: No shuffle (X, Y)
shuffled_X, shuffled_Y = X, Y
Y = Y.tolist()
Y_pos = [i for i in range(len(Y)) if Y[i] == 1]
Y_neg = [i for i in range(len(Y)) if Y[i] == 0]
# Step 2: Randomly pick mini_batch_size / 2 from each of positive and negative labels
num_complete_minibatches = int(math.floor(m / float(mini_batch_size))) + 1
for k in range(0, num_complete_minibatches):
indexes = sorted(
random.sample(Y_pos, int(mini_batch_size / 2)) + random.sample(Y_neg, int(mini_batch_size / 2)))
mini_batch_X, mini_batch_Y = shuffled_X[indexes], shuffled_Y[indexes]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def mini_batches(X, Y, mini_batch_size=64, seed=0):
m = X.shape[0] # number of training examples
mini_batches = list()
np.random.seed(seed)
# Step 1: No shuffle (X, Y)
shuffled_X, shuffled_Y = X, Y
# Step 2: Partition (X, Y). Minus the end case.
# number of mini batches of size mini_batch_size in your partitioning
num_complete_minibatches = int(math.floor(m / float(mini_batch_size)))
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
if len(Y.shape) == 1:
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size]
else:
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :]
if len(Y.shape) == 1:
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m]
else:
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def DBN_JIT(train_features, train_labels, test_features, test_labels, hidden_units=[20, 12, 12], num_epochs_LR=200):
# training DBN model
#################################################################################################
starttime = time.time()
dbn_model = DBN(visible_units=train_features.shape[1],
hidden_units=hidden_units,
use_gpu=False)
dbn_model.train_static(train_features, train_labels, num_epochs=10)
# Finishing the training DBN model
# print('---------------------Finishing the training DBN model---------------------')
# using DBN model to construct features
DBN_train_features, _ = dbn_model.forward(train_features)
DBN_test_features, _ = dbn_model.forward(test_features)
DBN_train_features = DBN_train_features.numpy()
DBN_test_features = DBN_test_features.numpy()
train_features = np.hstack((train_features, DBN_train_features))
test_features = np.hstack((test_features, DBN_test_features))
if len(train_labels.shape) == 1:
num_classes = 1
else:
num_classes = train_labels.shape[1]
# lr_model = LR(input_size=hidden_units, num_classes=num_classes)
lr_model = LR(input_size=train_features.shape[1], num_classes=num_classes)
optimizer = torch.optim.Adam(lr_model.parameters(), lr=0.00001)
steps = 0
batches_test = mini_batches(X=test_features, Y=test_labels)
for epoch in range(1, num_epochs_LR + 1):
# building batches for training model
batches_train = mini_batches_update(X=train_features, Y=train_labels)
for batch in batches_train:
x_batch, y_batch = batch
x_batch, y_batch = torch.tensor(x_batch).float(), torch.tensor(y_batch).float()
optimizer.zero_grad()
predict = lr_model.forward(x_batch)
loss = nn.BCELoss()
loss = loss(predict, y_batch)
loss.backward()
optimizer.step()
# steps += 1
# if steps % 100 == 0:
# print('\rEpoch: {} step: {} - loss: {:.6f}'.format(epoch, steps, loss.item()))
endtime = time.time()
dtime = endtime - starttime
print("Train Time: %.8s s" % dtime) #显示到微秒
starttime = time.time()
y_pred, lables = lr_model.predict(data=batches_test)
endtime = time.time()
dtime = endtime - starttime
print("Eval Time: %.8s s" % dtime) #显示到微秒
return y_pred
def baseline_algorithm(train, test, algorithm, only=False):
_, y_train, X_train = train
_, y_test, X_test = test
X_train, X_test = preprocessing.scale(X_train), preprocessing.scale(X_test)
acc, prc, rc, f1, auc_ = 0, 0, 0, 0, 0
if algorithm == 'lr':
starttime = time.time()
model = LogisticRegression(max_iter=7000).fit(X_train, y_train)
endtime = time.time()
dtime = endtime - starttime
print("Train Time: %.8s s" % dtime) #显示到微秒
starttime = time.time()
y_pred = model.predict_proba(X_test)[:, 1]
endtime = time.time()
dtime = endtime - starttime
print("Eval Time: %.8s s" % dtime) #显示到微秒
acc, prc, rc, f1, auc_ = evaluation_metrics(y_true=y_test, y_pred=y_pred)
if only and not "cross" in args.data:
auc_ = train_and_evl(X_train, y_train, args)
print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' % (acc, prc, rc, f1, auc_))
elif algorithm =='dbn':
y_pred = DBN_JIT(X_train, y_train, X_test, y_test)
acc, prc, rc, f1, auc_ = evaluation_metrics(y_true=y_test, y_pred=y_pred)
acc, prc, rc, f1 = 0, 0, 0, 0
print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' % (acc, prc, rc, f1, auc_))
else:
print('You need to give the correct algorithm name')
return
return y_test, y_pred
def save_result(labels, predicts, path):
results = []
for lable, predict in zip(labels, predicts):
results.append('{}\t{}\n'.format(lable, predict))
with open(path, 'w', encoding='utf-8') as f:
f.writelines(results)
if __name__ == '__main__':
args = parser.parse_args()
save_path = 'result/{}/{}_{}_{}.result'.format(args.project, args.project, args.algorithm, args.data.replace("/","_"))
only = True if args.only else False
if args.algorithm == 'la':
args.algorithm = 'lr'
args.only = ['la']
if "all" in args.only:
args.only.remove("all")
train, test = load_yasu_data(args)
labels, predicts = baseline_algorithm(train=train, test=test, algorithm=args.algorithm, only=only)
if not only:
save_result(labels, predicts, save_path)
| 35.402062
| 122
| 0.631722
|
import math
import random
import time
import argparse
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc
import pandas as pd
import numpy as np
import torch.nn as nn
import torch
from LR import LR
from DBN import DBN
parser = argparse.ArgumentParser()
parser.add_argument('-project', type=str,
default='qt')
parser.add_argument('-data', type=str,
default='k')
parser.add_argument('-algorithm', type=str,
default='lr')
parser.add_argument('-drop', type=str,
default='')
parser.add_argument('-only', nargs='+',
default=[])
def evaluation_metrics(y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true=y_true, y_score=y_pred, pos_label=1)
auc_ = auc(fpr, tpr)
y_pred = [1 if p >= 0.5 else 0 for p in y_pred]
acc = accuracy_score(y_true=y_true, y_pred=y_pred)
prc = precision_score(y_true=y_true, y_pred=y_pred)
rc = recall_score(y_true=y_true, y_pred=y_pred)
f1 = 0
return acc, prc, rc, f1, auc_
def replace_value_dataframe(df):
df = df.replace({True: 1, False: 0})
df = df.fillna(df.mean())
if args.drop:
df = df.drop(columns=[args.drop])
elif args.only:
df = df[['Unnamed: 0','_id','date','bug','__'] + args.only]
return df.values
def get_features(data):
return data[:, 5:]
def get_ids(data):
return data[:, 1:2].flatten().tolist()
def get_label(data):
data = data[:, 3:4].flatten().tolist()
data = [1 if int(d) > 0 else 0 for d in data]
return data
def load_df_yasu_data(path_data):
data = pd.read_csv(path_data)
data = replace_value_dataframe(df=data)
ids, labels, features = get_ids(data=data), get_label(data=data), get_features(data=data)
indexes = list()
cnt_noexits = 0
for i in range(0, len(ids)):
try:
indexes.append(i)
except FileNotFoundError:
print('File commit id no exits', ids[i], cnt_noexits)
cnt_noexits += 1
ids = [ids[i] for i in indexes]
labels = [labels[i] for i in indexes]
features = features[indexes]
return (ids, np.array(labels), features)
def load_yasu_data(args):
train_path_data = 'data/{}/{}_train.csv'.format(args.project, args.data)
test_path_data = 'data/{}/{}_test.csv'.format(args.project, args.data)
train, test = load_df_yasu_data(train_path_data), load_df_yasu_data(test_path_data)
return train, test
def train_and_evl(data, label, args):
size = int(label.shape[0]*0.2)
auc_ = []
for i in range(5):
idx = size * i
X_e = data[idx:idx+size]
y_e = label[idx:idx+size]
X_t = np.vstack((data[:idx], data[idx+size:]))
y_t = np.hstack((label[:idx], label[idx+size:]))
model = LogisticRegression(max_iter=7000).fit(X_t, y_t)
y_pred = model.predict_proba(X_e)[:, 1]
fpr, tpr, thresholds = roc_curve(y_true=y_e, y_score=y_pred, pos_label=1)
auc_.append(auc(fpr, tpr))
return np.mean(auc_)
def mini_batches_update(X, Y, mini_batch_size=64, seed=0):
m = X.shape[0]
mini_batches = list()
np.random.seed(seed)
shuffled_X, shuffled_Y = X, Y
Y = Y.tolist()
Y_pos = [i for i in range(len(Y)) if Y[i] == 1]
Y_neg = [i for i in range(len(Y)) if Y[i] == 0]
num_complete_minibatches = int(math.floor(m / float(mini_batch_size))) + 1
for k in range(0, num_complete_minibatches):
indexes = sorted(
random.sample(Y_pos, int(mini_batch_size / 2)) + random.sample(Y_neg, int(mini_batch_size / 2)))
mini_batch_X, mini_batch_Y = shuffled_X[indexes], shuffled_Y[indexes]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def mini_batches(X, Y, mini_batch_size=64, seed=0):
m = X.shape[0]
mini_batches = list()
np.random.seed(seed)
shuffled_X, shuffled_Y = X, Y
num_complete_minibatches = int(math.floor(m / float(mini_batch_size)))
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
if len(Y.shape) == 1:
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size]
else:
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :]
if len(Y.shape) == 1:
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m]
else:
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def DBN_JIT(train_features, train_labels, test_features, test_labels, hidden_units=[20, 12, 12], num_epochs_LR=200):
| true
| true
|
790b43c41ba693b53623a612b0afe868c0fa7c66
| 2,958
|
py
|
Python
|
pennylane_cirq/cirq_operation.py
|
tonybruguier/pennylane-cirq
|
48db8bbdfd234516c8e4704e097a37a8ea45e8fd
|
[
"Apache-2.0"
] | 21
|
2020-08-06T16:16:07.000Z
|
2022-03-24T06:25:28.000Z
|
pennylane_cirq/cirq_operation.py
|
tonybruguier/pennylane-cirq
|
48db8bbdfd234516c8e4704e097a37a8ea45e8fd
|
[
"Apache-2.0"
] | 67
|
2020-07-28T07:42:58.000Z
|
2022-03-24T14:32:59.000Z
|
pennylane_cirq/cirq_operation.py
|
tonybruguier/pennylane-cirq
|
48db8bbdfd234516c8e4704e097a37a8ea45e8fd
|
[
"Apache-2.0"
] | 10
|
2020-10-14T15:13:13.000Z
|
2022-02-20T10:51:38.000Z
|
# Copyright 2019-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cirq Operation class
====================
**Module name:** :mod:`pennylane_cirq.cirq_operation`
.. currentmodule:: pennylane_cirq.cirq_operation
An helper class that wraps the native Cirq operations and provides an interface for PennyLane.
Classes
-------
.. autosummary::
CirqOperation
Code details
~~~~~~~~~~~~
"""
from collections.abc import Sequence
import cirq
import pennylane as qml
class CirqOperation:
"""A helper class that wraps the native Cirq operations and provides an
interface for parametrization and application."""
def __init__(self, parametrization):
"""Initializes the CirqOperation
Args:
parametrization (Tuple[float] -> Union[Cirq:Qid, List[Cirq:Qid]]): Converts the
PennyLane gate parameters to an ordered list of gates that are to be applied.
"""
self.parametrization = parametrization
self.parametrized_cirq_gates = None
self.is_inverse = False
def parametrize(self, *args):
"""Parametrizes the CirqOperation.
Args:
*args (float): the parameters for the operations
"""
self.parametrized_cirq_gates = self.parametrization(*args)
if not isinstance(self.parametrized_cirq_gates, Sequence):
self.parametrized_cirq_gates = [self.parametrized_cirq_gates]
if self.is_inverse:
# Cirq automatically reverses the order if it gets an iterable
self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates)
def apply(self, *qubits):
"""Applies the CirqOperation.
Args:
*qubits (Cirq:Qid): the qubits on which the Cirq gates should be performed.
"""
if not self.parametrized_cirq_gates:
raise qml.DeviceError("CirqOperation must be parametrized before it can be applied.")
return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates)
def inv(self):
"""Inverses the CirqOperation."""
# We can also support inversion after parametrization, but this is not necessary for the
# PennyLane-Cirq codebase at the moment.
if self.parametrized_cirq_gates:
raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.")
self.is_inverse = not self.is_inverse
| 33.235955
| 97
| 0.694388
|
from collections.abc import Sequence
import cirq
import pennylane as qml
class CirqOperation:
def __init__(self, parametrization):
self.parametrization = parametrization
self.parametrized_cirq_gates = None
self.is_inverse = False
def parametrize(self, *args):
self.parametrized_cirq_gates = self.parametrization(*args)
if not isinstance(self.parametrized_cirq_gates, Sequence):
self.parametrized_cirq_gates = [self.parametrized_cirq_gates]
if self.is_inverse:
self.parametrized_cirq_gates = cirq.inverse(self.parametrized_cirq_gates)
def apply(self, *qubits):
if not self.parametrized_cirq_gates:
raise qml.DeviceError("CirqOperation must be parametrized before it can be applied.")
return (parametrized_gate(*qubits) for parametrized_gate in self.parametrized_cirq_gates)
def inv(self):
if self.parametrized_cirq_gates:
raise qml.DeviceError("CirqOperation can't be inverted after it was parametrized.")
self.is_inverse = not self.is_inverse
| true
| true
|
790b43fed1d81433a5aa762856647e244de191b7
| 8,505
|
py
|
Python
|
src/model/encoder_decoder_module.py
|
saridormi/commit_message_generation
|
c25db61a5f41accfb566caaea5feb0d275751293
|
[
"MIT"
] | 1
|
2021-01-15T13:17:33.000Z
|
2021-01-15T13:17:33.000Z
|
src/model/encoder_decoder_module.py
|
saridormi/commit_message_generation
|
c25db61a5f41accfb566caaea5feb0d275751293
|
[
"MIT"
] | 1
|
2020-11-10T13:44:26.000Z
|
2020-11-26T15:20:42.000Z
|
src/model/encoder_decoder_module.py
|
saridormi/commit_message_generation
|
c25db61a5f41accfb566caaea5feb0d275751293
|
[
"MIT"
] | null | null | null |
from copy import copy
from typing import Optional
import torch
import pytorch_lightning as pl
from transformers import (
EncoderDecoderModel,
RobertaModel,
RobertaConfig,
GPT2LMHeadModel,
GPT2Config,
RobertaTokenizer,
GPT2Tokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import nltk
nltk.download("wordnet")
class EncoderDecoderModule(pl.LightningModule):
def __init__(
self,
learning_rate: float,
src_tokenizer: RobertaTokenizer,
trg_tokenizer: GPT2Tokenizer,
num_epochs: int,
num_batches: int,
num_gpus: int,
num_layers_encoder: Optional[int] = None,
num_layers_decoder: Optional[int] = None,
encoder_name_or_path: Optional[str] = None,
decoder_name_or_path: Optional[str] = None,
**kwargs,
):
super().__init__()
self._src_tokenizer = src_tokenizer
self._trg_tokenizer = trg_tokenizer
self._num_epochs = num_epochs
self._num_batches = num_batches
self._num_gpus = num_gpus
self.learning_rate = learning_rate
self.save_hyperparameters()
if encoder_name_or_path is not None and decoder_name_or_path is not None:
# use pretrained RoBERTa as encoder
encoder = RobertaModel.from_pretrained(encoder_name_or_path)
# resize embeddings to match vocabulary size
encoder.resize_token_embeddings(len(self._src_tokenizer))
# remove layers if necessary
if num_layers_encoder is not None and num_layers_encoder < encoder.config.num_hidden_layers:
encoder = EncoderDecoderModule.remove_layers_from_model(encoder, num_layers_encoder, is_gpt=False)
# use pretrained GPT-2 as decoder
config = GPT2Config.from_pretrained(decoder_name_or_path)
config.is_decoder = True
config.add_cross_attention = True
decoder = GPT2LMHeadModel.from_pretrained(decoder_name_or_path, config=config)
# remove layers if necessary
if num_layers_decoder is not None and num_layers_decoder < decoder.config.n_layer:
decoder = EncoderDecoderModule.remove_layers_from_model(decoder, num_layers_decoder, is_gpt=True)
elif num_layers_decoder is not None and num_layers_encoder is not None:
# use randomly initialized RoBERTa as encoder
encoder_config = RobertaConfig()
encoder_config.num_hidden_layers = num_layers_encoder
encoder = RobertaModel(config=encoder_config)
# resize embeddings to match vocabulary size
encoder.resize_token_embeddings(len(self._src_tokenizer))
# use randomly initialized GPT-2 as decoder
decoder_config = GPT2Config()
decoder_config.n_layer = num_layers_decoder
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
decoder = GPT2LMHeadModel(config=decoder_config)
else:
raise ValueError(
"You have to specify either num_layers for training from scratch \
or paths for loading pretrained models"
)
self.model = EncoderDecoderModel(encoder=encoder, decoder=decoder)
# cache is currently not supported by EncoderDecoder framework
self.model.decoder.config.use_cache = False
# do not tie output embeddings to input embeddings
self.model.config.tie_word_embeddings = False
# to make logs for different batch sizes prettier
self.examples_count = 0
def forward(self, batch):
return self.model(
input_ids=batch["diff_input_ids"],
attention_mask=batch["diff_attention_mask"],
decoder_input_ids=batch["msg_input_ids"],
decoder_attention_mask=batch["msg_attention_mask"],
labels=batch["msg_labels"],
)
def training_step(self, batch, batch_idx):
self.examples_count += len(batch["diff_input_ids"])
loss, logits = self(batch)[:2]
self.logger.experiment.log({"train_loss_step": loss}, step=self.examples_count)
return {"loss": loss}
def training_epoch_end(self, outputs):
train_loss_mean = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.log({"train_loss_epoch": train_loss_mean}, step=self.examples_count)
def next_token_metrics_step(self, batch):
loss, scores = self(batch)[:2]
return {"loss": loss}
def next_token_metrics_epoch_end(self, outputs, stage):
"""
Logic for validation & testing epoch end:
1) Calculate accuracy@1, accuracy@5, MRR@5
2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint
3) Log everything to wandb
"""
loss = torch.stack([x["loss"] for x in outputs]).mean()
metrics = {f"{stage}_loss_epoch": loss}
if stage == "val":
self.log("val_loss_epoch", metrics["val_loss_epoch"], on_step=False, on_epoch=True, prog_bar=True, logger=False)
self.logger.experiment.log(metrics, step=self.examples_count)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.next_token_metrics_step(batch)
def validation_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="val")
def test_step(self, batch, batch_idx):
return self.next_token_metrics_step(batch)
def test_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="test")
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.learning_rate)
scheduler = {
"scheduler": get_linear_schedule_with_warmup(
optimizer, 4000 // self._num_gpus, self._num_epochs * self._num_batches
),
"interval": "step",
"frequency": 1,
}
return [optimizer], [scheduler]
@staticmethod
def remove_layers_from_model(teacher, num_layers, is_gpt):
if not is_gpt:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.num_hidden_layers = num_layers
student = RobertaModel(config=student_config)
# copy all embeddings
student.embeddings.word_embeddings = teacher.embeddings.word_embeddings
student.embeddings.position_embeddings = teacher.embeddings.position_embeddings
student.embeddings.token_type_embeddings = teacher.embeddings.token_type_embeddings
student.embeddings.LayerNorm = teacher.embeddings.LayerNorm
student.embeddings.dropout = teacher.embeddings.dropout
# uniformly pick from middle layers from teacher
# it is basically np.linspace(0, teacher_config.num_hidden_layers,
# num=student_config.num_hidden_layers, endpoint=True)
step = (teacher_config.num_hidden_layers - 1) / (student_config.num_hidden_layers - 1)
for student_layer, teacher_layer in enumerate(
int(i * step) for i in range(student_config.num_hidden_layers)
):
student.encoder.layer[student_layer] = teacher.encoder.layer[teacher_layer]
else:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.n_layer = num_layers
student = GPT2LMHeadModel(config=student_config)
# Copying all embeddings
student.transformer.wte = teacher.transformer.wte
student.transformer.wpe = teacher.transformer.wpe
student.transformer.drop = teacher.transformer.drop
# Maybe there is something else in BERT that need to be copied!
# Specific thing for GPT2LMHead. Not necessary for BERT
student.tie_weights()
# Uniformly pick from middle layers from teacher
# It is basically np.linspace(0, teacher_config.n_layer, num=student_config.n_layer, endpoint=True)
step = (teacher_config.n_layer - 1) / (student_config.n_layer - 1)
for student_layer, teacher_layer in enumerate(int(i * step) for i in range(student_config.n_layer)):
student.transformer.h[student_layer] = teacher.transformer.h[teacher_layer]
return student
| 42.313433
| 124
| 0.663139
|
from copy import copy
from typing import Optional
import torch
import pytorch_lightning as pl
from transformers import (
EncoderDecoderModel,
RobertaModel,
RobertaConfig,
GPT2LMHeadModel,
GPT2Config,
RobertaTokenizer,
GPT2Tokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import nltk
nltk.download("wordnet")
class EncoderDecoderModule(pl.LightningModule):
def __init__(
self,
learning_rate: float,
src_tokenizer: RobertaTokenizer,
trg_tokenizer: GPT2Tokenizer,
num_epochs: int,
num_batches: int,
num_gpus: int,
num_layers_encoder: Optional[int] = None,
num_layers_decoder: Optional[int] = None,
encoder_name_or_path: Optional[str] = None,
decoder_name_or_path: Optional[str] = None,
**kwargs,
):
super().__init__()
self._src_tokenizer = src_tokenizer
self._trg_tokenizer = trg_tokenizer
self._num_epochs = num_epochs
self._num_batches = num_batches
self._num_gpus = num_gpus
self.learning_rate = learning_rate
self.save_hyperparameters()
if encoder_name_or_path is not None and decoder_name_or_path is not None:
encoder = RobertaModel.from_pretrained(encoder_name_or_path)
encoder.resize_token_embeddings(len(self._src_tokenizer))
if num_layers_encoder is not None and num_layers_encoder < encoder.config.num_hidden_layers:
encoder = EncoderDecoderModule.remove_layers_from_model(encoder, num_layers_encoder, is_gpt=False)
config = GPT2Config.from_pretrained(decoder_name_or_path)
config.is_decoder = True
config.add_cross_attention = True
decoder = GPT2LMHeadModel.from_pretrained(decoder_name_or_path, config=config)
if num_layers_decoder is not None and num_layers_decoder < decoder.config.n_layer:
decoder = EncoderDecoderModule.remove_layers_from_model(decoder, num_layers_decoder, is_gpt=True)
elif num_layers_decoder is not None and num_layers_encoder is not None:
encoder_config = RobertaConfig()
encoder_config.num_hidden_layers = num_layers_encoder
encoder = RobertaModel(config=encoder_config)
encoder.resize_token_embeddings(len(self._src_tokenizer))
decoder_config = GPT2Config()
decoder_config.n_layer = num_layers_decoder
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
decoder = GPT2LMHeadModel(config=decoder_config)
else:
raise ValueError(
"You have to specify either num_layers for training from scratch \
or paths for loading pretrained models"
)
self.model = EncoderDecoderModel(encoder=encoder, decoder=decoder)
self.model.decoder.config.use_cache = False
self.model.config.tie_word_embeddings = False
self.examples_count = 0
def forward(self, batch):
return self.model(
input_ids=batch["diff_input_ids"],
attention_mask=batch["diff_attention_mask"],
decoder_input_ids=batch["msg_input_ids"],
decoder_attention_mask=batch["msg_attention_mask"],
labels=batch["msg_labels"],
)
def training_step(self, batch, batch_idx):
self.examples_count += len(batch["diff_input_ids"])
loss, logits = self(batch)[:2]
self.logger.experiment.log({"train_loss_step": loss}, step=self.examples_count)
return {"loss": loss}
def training_epoch_end(self, outputs):
train_loss_mean = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.log({"train_loss_epoch": train_loss_mean}, step=self.examples_count)
def next_token_metrics_step(self, batch):
loss, scores = self(batch)[:2]
return {"loss": loss}
def next_token_metrics_epoch_end(self, outputs, stage):
loss = torch.stack([x["loss"] for x in outputs]).mean()
metrics = {f"{stage}_loss_epoch": loss}
if stage == "val":
self.log("val_loss_epoch", metrics["val_loss_epoch"], on_step=False, on_epoch=True, prog_bar=True, logger=False)
self.logger.experiment.log(metrics, step=self.examples_count)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.next_token_metrics_step(batch)
def validation_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="val")
def test_step(self, batch, batch_idx):
return self.next_token_metrics_step(batch)
def test_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="test")
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.learning_rate)
scheduler = {
"scheduler": get_linear_schedule_with_warmup(
optimizer, 4000 // self._num_gpus, self._num_epochs * self._num_batches
),
"interval": "step",
"frequency": 1,
}
return [optimizer], [scheduler]
@staticmethod
def remove_layers_from_model(teacher, num_layers, is_gpt):
if not is_gpt:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.num_hidden_layers = num_layers
student = RobertaModel(config=student_config)
student.embeddings.word_embeddings = teacher.embeddings.word_embeddings
student.embeddings.position_embeddings = teacher.embeddings.position_embeddings
student.embeddings.token_type_embeddings = teacher.embeddings.token_type_embeddings
student.embeddings.LayerNorm = teacher.embeddings.LayerNorm
student.embeddings.dropout = teacher.embeddings.dropout
step = (teacher_config.num_hidden_layers - 1) / (student_config.num_hidden_layers - 1)
for student_layer, teacher_layer in enumerate(
int(i * step) for i in range(student_config.num_hidden_layers)
):
student.encoder.layer[student_layer] = teacher.encoder.layer[teacher_layer]
else:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.n_layer = num_layers
student = GPT2LMHeadModel(config=student_config)
student.transformer.wte = teacher.transformer.wte
student.transformer.wpe = teacher.transformer.wpe
student.transformer.drop = teacher.transformer.drop
student.tie_weights()
step = (teacher_config.n_layer - 1) / (student_config.n_layer - 1)
for student_layer, teacher_layer in enumerate(int(i * step) for i in range(student_config.n_layer)):
student.transformer.h[student_layer] = teacher.transformer.h[teacher_layer]
return student
| true
| true
|
790b4408c840cdd98f96427833a0a90fcceb1cf2
| 3,887
|
py
|
Python
|
test/data/test_common_data_multiprocess.py
|
jihuacao/Putil
|
b753fc94bea4cbda00f483681c55f0e9f54adef2
|
[
"Apache-2.0"
] | 1
|
2018-12-09T06:09:29.000Z
|
2018-12-09T06:09:29.000Z
|
test/data/test_common_data_multiprocess.py
|
jihuacao/Putil
|
b753fc94bea4cbda00f483681c55f0e9f54adef2
|
[
"Apache-2.0"
] | null | null | null |
test/data/test_common_data_multiprocess.py
|
jihuacao/Putil
|
b753fc94bea4cbda00f483681c55f0e9f54adef2
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import sys
import traceback
import numpy as np
import os
import Putil.base.logger as plog
plog.PutilLogConfig.config_file_handler(filename='./test/data/_log_test_common_data_multiprocess.log', mode='w')
plog.PutilLogConfig.config_log_level(stream=plog.INFO, file=plog.DEBUG)
plog.PutilLogConfig.config_format(plog.FormatRecommend)
plog.PutilLogConfig.config_handler(plog.stream_method | plog.file_method)
logger = plog.PutilLogConfig('TesCommonData').logger()
logger.setLevel(plog.DEBUG)
MainLogger = logger.getChild('Main')
MainLogger.setLevel(plog.DEBUG)
import Putil.test.data.test_common_data_unit as tbase
import Putil.data.common_data as pcd
import multiprocessing
pcd.DataPutProcess.set_running_mode(pcd.DataPutProcess.RunningMode.Debug)
if __name__ == '__main__':
manager_common_data = pcd.CommonDataManager()
manager_common_data.start()
data = manager_common_data.TestCommonData()
manager = multiprocessing.Manager()
pool = multiprocessing.Pool()
dpq = pcd.DataPutProcess(data, manager, pool)
pool.close()
dq = dpq.DataQueue()
restart_param = dict()
restart_param['critical_process'] = 'random_fill'
dpq.restart(**restart_param)
# pool.join()
# print(dpq.queue_process_ret.get())
count = 0
while dpq.has_next():
data = dq.get()
assert len(data) == 1
for k, v in enumerate(data[0]):
assert v.datas().shape[0] == 1
pass
count += 1
pass
assert count == 100
restart_param['device_batch'] = [1]
restart_param['critical_process'] = 'random_fill'
dpq.restart(**restart_param)
count = 0
while dpq.has_next():
dq.get()
count += 1
pass
assert count == 100
restart_param['device_batch'] = [1]
restart_param['critical_process'] = 'allow_low'
dpq.restart(**restart_param)
dpq.pause_queue()
now_size = dpq.DataQueue().qsize()
count = 0
while dpq.paused_and_has_next():
dq.get()
count += 1
pass
assert count == now_size
dpq.continue_queue()
while dpq.has_next():
dq.get()
count += 1
pass
assert count == 100
restart_param['device_batch'] = [1]
restart_param['critical_process'] = 'allow_low'
dpq.restart(**restart_param)
count = 0
while count < 50 and dpq.has_next():
get = dq.get()
assert len(get) == 1
for k, v in enumerate(get[0]):
assert v.datas().shape == (1, 1), print(v.datas().shape)
pass
count += 1
pass
dpq.inject_operation({'recycle': True}, device_batch=[2])
while count < 60 and dpq.has_next():
get = dq.get()
assert len(get) == 1
for k, v in enumerate(get[0]):
assert v.datas().shape == (2, 1), print(v.datas().shape)
pass
count += 1
pass
old_size = dpq.inject_operation({'recycle': False}, device_batch=[1])
while count < 60 + old_size and dpq.has_next():
get = dq.get()
assert len(get) == 1
for k, v in enumerate(get[0]):
assert v.datas().shape == (2, 1), print(get[0].datas().shape)
count += 1
pass
assert count == 60 + old_size, print(count)
remain_count = 100 - (50 + (10 + old_size) * 2)
truck_count = count
while (count - truck_count) < remain_count and dpq.has_next():
get = dq.get()
assert len(get) == 1
for k, v in enumerate(get[0]):
assert v.datas().shape == (1, 1), print(get[0].datas().shape)
count += 1
pass
assert count == old_size + remain_count + 60, print(count)
dpq.stop_generation()
pool.join()
print(dpq.queue_process_ret().get())
# while dq.empty() is False or dpq.EpochDoneFlag.value is False:
# print('get')
# print(dq.get())
pass
| 29.225564
| 112
| 0.621045
|
import sys
import traceback
import numpy as np
import os
import Putil.base.logger as plog
plog.PutilLogConfig.config_file_handler(filename='./test/data/_log_test_common_data_multiprocess.log', mode='w')
plog.PutilLogConfig.config_log_level(stream=plog.INFO, file=plog.DEBUG)
plog.PutilLogConfig.config_format(plog.FormatRecommend)
plog.PutilLogConfig.config_handler(plog.stream_method | plog.file_method)
logger = plog.PutilLogConfig('TesCommonData').logger()
logger.setLevel(plog.DEBUG)
MainLogger = logger.getChild('Main')
MainLogger.setLevel(plog.DEBUG)
import Putil.test.data.test_common_data_unit as tbase
import Putil.data.common_data as pcd
import multiprocessing
pcd.DataPutProcess.set_running_mode(pcd.DataPutProcess.RunningMode.Debug)
if __name__ == '__main__':
manager_common_data = pcd.CommonDataManager()
manager_common_data.start()
data = manager_common_data.TestCommonData()
manager = multiprocessing.Manager()
pool = multiprocessing.Pool()
dpq = pcd.DataPutProcess(data, manager, pool)
pool.close()
dq = dpq.DataQueue()
restart_param = dict()
restart_param['critical_process'] = 'random_fill'
dpq.restart(**restart_param)
count = 0
while dpq.has_next():
data = dq.get()
assert len(data) == 1
for k, v in enumerate(data[0]):
assert v.datas().shape[0] == 1
pass
count += 1
pass
assert count == 100
restart_param['device_batch'] = [1]
restart_param['critical_process'] = 'random_fill'
dpq.restart(**restart_param)
count = 0
while dpq.has_next():
dq.get()
count += 1
pass
assert count == 100
restart_param['device_batch'] = [1]
restart_param['critical_process'] = 'allow_low'
dpq.restart(**restart_param)
dpq.pause_queue()
now_size = dpq.DataQueue().qsize()
count = 0
while dpq.paused_and_has_next():
dq.get()
count += 1
pass
assert count == now_size
dpq.continue_queue()
while dpq.has_next():
dq.get()
count += 1
pass
assert count == 100
restart_param['device_batch'] = [1]
restart_param['critical_process'] = 'allow_low'
dpq.restart(**restart_param)
count = 0
while count < 50 and dpq.has_next():
get = dq.get()
assert len(get) == 1
for k, v in enumerate(get[0]):
assert v.datas().shape == (1, 1), print(v.datas().shape)
pass
count += 1
pass
dpq.inject_operation({'recycle': True}, device_batch=[2])
while count < 60 and dpq.has_next():
get = dq.get()
assert len(get) == 1
for k, v in enumerate(get[0]):
assert v.datas().shape == (2, 1), print(v.datas().shape)
pass
count += 1
pass
old_size = dpq.inject_operation({'recycle': False}, device_batch=[1])
while count < 60 + old_size and dpq.has_next():
get = dq.get()
assert len(get) == 1
for k, v in enumerate(get[0]):
assert v.datas().shape == (2, 1), print(get[0].datas().shape)
count += 1
pass
assert count == 60 + old_size, print(count)
remain_count = 100 - (50 + (10 + old_size) * 2)
truck_count = count
while (count - truck_count) < remain_count and dpq.has_next():
get = dq.get()
assert len(get) == 1
for k, v in enumerate(get[0]):
assert v.datas().shape == (1, 1), print(get[0].datas().shape)
count += 1
pass
assert count == old_size + remain_count + 60, print(count)
dpq.stop_generation()
pool.join()
print(dpq.queue_process_ret().get())
pass
| true
| true
|
790b4442fff9865e295ebbcbe378b193f872cc15
| 299
|
py
|
Python
|
pypv/scripts/sun_path.py
|
TomLXXVI/pypv
|
df3dfba586bdec171e3fa9795b7bae48f76f83f2
|
[
"MIT"
] | null | null | null |
pypv/scripts/sun_path.py
|
TomLXXVI/pypv
|
df3dfba586bdec171e3fa9795b7bae48f76f83f2
|
[
"MIT"
] | null | null | null |
pypv/scripts/sun_path.py
|
TomLXXVI/pypv
|
df3dfba586bdec171e3fa9795b7bae48f76f83f2
|
[
"MIT"
] | 1
|
2022-03-09T06:26:28.000Z
|
2022-03-09T06:26:28.000Z
|
from sun.geometry import Location, SunPath
from date_time import Date
loc = Location(
name='Ghent',
region='Belgium',
latitude=51.07,
longitude=3.69,
timezone='Europe/Brussels',
altitude=9.0
)
date = Date(year=2019, month=7, day=29)
sp = SunPath(loc, date)
sp.print_table()
| 19.933333
| 42
| 0.682274
|
from sun.geometry import Location, SunPath
from date_time import Date
loc = Location(
name='Ghent',
region='Belgium',
latitude=51.07,
longitude=3.69,
timezone='Europe/Brussels',
altitude=9.0
)
date = Date(year=2019, month=7, day=29)
sp = SunPath(loc, date)
sp.print_table()
| true
| true
|
790b448853419d178189e69c3ab9a004b01917ac
| 2,231
|
py
|
Python
|
jupyter_notebook_config.py
|
colinjbrown/dfext-dockerstack
|
9496793162c67707fd8bb0b0207392b3d4f651b9
|
[
"BSD-3-Clause"
] | null | null | null |
jupyter_notebook_config.py
|
colinjbrown/dfext-dockerstack
|
9496793162c67707fd8bb0b0207392b3d4f651b9
|
[
"BSD-3-Clause"
] | null | null | null |
jupyter_notebook_config.py
|
colinjbrown/dfext-dockerstack
|
9496793162c67707fd8bb0b0207392b3d4f651b9
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import errno
import stat
c = get_config() # noqa: F821
c.NotebookApp.ip = "0.0.0.0"
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
c.Spawner.args = ['--NotebookApp.tornado_settings={"headers":{"Content-Security-Policy": "frame-ancestors * \'self\' colinjbrown.com:*"}}']
c.NotebookApp.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors * \'self\' colinjbrown.com:*"} }
c.JupyterHub.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors * \'self\' colinjbrown.com:*"} }
# https://github.com/jupyter/notebook/issues/3130
c.FileContentsManager.delete_to_trash = False
# Generate a self-signed certificate
if "GEN_CERT" in os.environ:
dir_name = jupyter_data_dir()
pem_file = os.path.join(dir_name, "notebook.pem")
try:
os.makedirs(dir_name)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(dir_name):
pass
else:
raise
# Generate an openssl.cnf file to set the distinguished name
cnf_file = os.path.join(os.getenv("CONDA_DIR", "/usr/lib"), "ssl", "openssl.cnf")
if not os.path.isfile(cnf_file):
with open(cnf_file, "w") as fh:
fh.write(
"""\
[req]
distinguished_name = req_distinguished_name
[req_distinguished_name]
"""
)
# Generate a certificate if one doesn't exist on disk
subprocess.check_call(
[
"openssl",
"req",
"-new",
"-newkey=rsa:2048",
"-days=365",
"-nodes",
"-x509",
"-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated",
f"-keyout={pem_file}",
f"-out={pem_file}",
]
)
# Restrict access to the file
os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)
c.NotebookApp.certfile = pem_file
# Change default umask for all subprocesses of the notebook server if set in
# the environment
if "NB_UMASK" in os.environ:
os.umask(int(os.environ["NB_UMASK"], 8))
| 31.422535
| 142
| 0.636934
|
from jupyter_core.paths import jupyter_data_dir
import subprocess
import os
import errno
import stat
c = get_config()
c.NotebookApp.ip = "0.0.0.0"
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
c.Spawner.args = ['--NotebookApp.tornado_settings={"headers":{"Content-Security-Policy": "frame-ancestors * \'self\' colinjbrown.com:*"}}']
c.NotebookApp.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors * \'self\' colinjbrown.com:*"} }
c.JupyterHub.tornado_settings = { 'headers': { 'Content-Security-Policy': "frame-ancestors * \'self\' colinjbrown.com:*"} }
c.FileContentsManager.delete_to_trash = False
if "GEN_CERT" in os.environ:
dir_name = jupyter_data_dir()
pem_file = os.path.join(dir_name, "notebook.pem")
try:
os.makedirs(dir_name)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(dir_name):
pass
else:
raise
cnf_file = os.path.join(os.getenv("CONDA_DIR", "/usr/lib"), "ssl", "openssl.cnf")
if not os.path.isfile(cnf_file):
with open(cnf_file, "w") as fh:
fh.write(
"""\
[req]
distinguished_name = req_distinguished_name
[req_distinguished_name]
"""
)
subprocess.check_call(
[
"openssl",
"req",
"-new",
"-newkey=rsa:2048",
"-days=365",
"-nodes",
"-x509",
"-subj=/C=XX/ST=XX/L=XX/O=generated/CN=generated",
f"-keyout={pem_file}",
f"-out={pem_file}",
]
)
# Restrict access to the file
os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)
c.NotebookApp.certfile = pem_file
# Change default umask for all subprocesses of the notebook server if set in
# the environment
if "NB_UMASK" in os.environ:
os.umask(int(os.environ["NB_UMASK"], 8))
| true
| true
|
790b44aca3f687434ba362d7b3c00873f8ece524
| 10,602
|
py
|
Python
|
example_client/multi_inputs.py
|
Xaenalt/model_server
|
f977dbf1246ebf85e960ca058e814deac7c6a16c
|
[
"Apache-2.0"
] | 305
|
2018-10-01T12:41:28.000Z
|
2020-04-24T10:36:08.000Z
|
example_client/multi_inputs.py
|
Xaenalt/model_server
|
f977dbf1246ebf85e960ca058e814deac7c6a16c
|
[
"Apache-2.0"
] | 199
|
2020-04-29T08:43:21.000Z
|
2022-03-29T09:05:52.000Z
|
example_client/multi_inputs.py
|
Xaenalt/model_server
|
f977dbf1246ebf85e960ca058e814deac7c6a16c
|
[
"Apache-2.0"
] | 80
|
2020-04-29T14:54:41.000Z
|
2022-03-30T14:50:29.000Z
|
#
# Copyright (c) 2019-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Multi-threaded sample to run a RMNet & SSDMobilenet v2 that will
# detect only person, bike and vehicle (change the output parsing
# for more classes)
#
# Example usage:
# RMNet: python3.6 multi_inputs.py -n "RMNet" -l "data" -o "detection_out"
# -d 1024 -i 127.0.0.1 -p 9001 -c 1
# -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4
# SSDMobileNet: python3.6 multi_inputs.py -n "SSDMobileNet" -l "image_tensor"
# -o "DetectionOutput" -d 300 -i 127.0.0.1 -p 9001 -c 1
# -f /var/repos/github/sample-videos/person-bicycle-car-detection.mp4
from __future__ import print_function
from argparse import ArgumentParser, SUPPRESS
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from time import time, sleep
import sys
import os
import cv2
import grpc
import threading
import logging as log
from tensorflow import make_tensor_proto, make_ndarray
# global data (shared between threads & main)
CLASSES = ["None", "Pedestrian", "Vehicle", "Bike", "Other"]
COLORS = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (128, 128, 128)]
SRC_TYPE = ["Camera", "Video"]
exit_ok = False # manage thread loop
CAM_WIDTH = 640 # camera width
CAM_HEIGHT = 480 # camera height
CAM_FPS = 30 # camera speed
CONFIDENCE_THRESHOLD = 0.75 # detection confidence
#####################################################################################
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS,
help='Show this help message and exit.')
args.add_argument('-n', '--network_name', required=True,
type=str, help='Network name')
args.add_argument('-l', '--input_layer', required=True,
type=str, help='Input layer name')
args.add_argument('-o', '--output_layer', required=True,
type=str, help='Output layer name')
args.add_argument('-d', '--frame_size', required=True,
type=int, help='Input frame width and height that matches used model')
args.add_argument('-c', '--num_cameras', help='Number of cameras to be used',
required=False, type=int, default=1)
args.add_argument('-f', '--file', help='Path to the video file',
required=False, type=str)
args.add_argument('-i', '--ip', help='ip address of the ovms', required=True)
args.add_argument('-p', '--port', help='port of the ovms', required=True)
return parser
# Decoding idea based on the link below. Not very accurate. So pls implement yours
# https://github.com/opencv/open_model_zoo/blob/master/intel_models/\
# person-vehicle-bike-detection-crossroad-0078/\
# description/person-vehicle-bike-detection-crossroad-0078.md
def parse_output(thr_id, res, frame):
for batch, data in enumerate(res):
pred = data[0]
for values in enumerate(pred):
# tuple
index = values[0]
l_pred = values[1]
# actual predictions
img_id = l_pred[0]
label = l_pred[1]
conf = l_pred[2]
x_min = l_pred[3]
y_min = l_pred[4]
x_max = l_pred[5]
y_max = l_pred[6]
# preventing any wrong array indexing (for RMNet)
if label > 4:
# Unsupported class label detected. Change to `other`.
label = 4
# Do you want confidence level to be passed from command line?
if img_id != -1 and conf >= CONFIDENCE_THRESHOLD:
# draw the bounding boxes on the frame
height, width = frame.shape[:2]
cv2.rectangle(frame, (int(width * x_min), int(height * y_min)),
(int(width * x_max), int(height * y_max)), COLORS[int(label)], 2)
cv2.putText(frame, str(CLASSES[int(label)]), (int(width * x_min)-10,
int(height * y_min)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
COLORS[int(label)], 2)
return frame
# This is common for both the camera & video files
def thread_function(thr_id, network_name, input_layer, output_layer, input_dimension,
ip, port, disp_buf, src_type, src_name):
if src_type == "Camera":
# UVC camera init - camera threads always come first and we use it
# to generate the camera indexes
cam = cv2.VideoCapture(thr_id)
if not (cam.isOpened()):
log.error("Failed to open the UVC camera {}".format(thr_id))
return
cam.set(cv2.CAP_PROP_FRAME_WIDTH, CAM_WIDTH)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, CAM_HEIGHT)
# not all UVC cameras honor below request
cam.set(cv2.CAP_PROP_FPS, CAM_FPS)
# If your camera sends other than MJPEG, change below
cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
elif src_type == "Video":
# Assumption: src_name will be valid
cam = cv2.VideoCapture(src_name)
# inference stats
fps = 0 # camera fps
inf_fps = 0 # inference fps
dropped_fps = 0 # dropped frame fps
cam_start_time = time()
# ovms connection
channel = grpc.insecure_channel("{}:{}".format(ip, port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
# Note: Pls maintain the same name while launching ovms docker container
request.model_spec.name = network_name
global exit_ok
while exit_ok == False:
ret, frame = cam.read()
if src_type == "Video":
# restart the video file when it reaches the end
if not ret:
cam.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
# normalize the video frame dimension to that of the camera
else:
# to maintain the frame inferencing parity with the cameras, lets sleep
# here to maintain cam_fps speed
sleep((1000 / CAM_FPS) / 1000)
# enable below line to keep video file & camera output window dimensions the same
# frame = cv2.resize(frame, (CAM_WIDTH, CAM_HEIGHT))
fps = fps + 1
if (time() - cam_start_time) * 1000 >= 1000:
log.warning('{}{} fps: {}, Inf fps: {}, dropped fps: {}'
.format(src_type, thr_id, fps, inf_fps, dropped_fps))
fps = 0
inf_fps = 0
dropped_fps = 0
cam_start_time = time()
# resize the frame to what network input layer expects it to be
image = cv2.resize(frame, (input_dimension, input_dimension))
image = image.transpose(2, 0, 1).reshape(1, 3, input_dimension, input_dimension)
image = image.astype('float32')
inf_time = time()
# send the input as protobuf
request.inputs[input_layer].CopyFrom(
make_tensor_proto(image, shape=None))
try:
result = stub.Predict(request, 10.0)
except Exception as e:
log.error('Caught exception {}'.format(e))
cam.release()
return
duration = time() - inf_time
# decode the received output as protobuf
res = make_ndarray(result.outputs[output_layer])
if not res.any():
log.error('Thr{}: Predictions came back with wrong output layer name'.format(thr_id))
dropped_fps = dropped_fps + 1
disp_buf[thr_id] = frame
else:
log.debug('Predictions came back fine')
inf_fps = inf_fps + 1
disp_buf[thr_id] = parse_output(thr_id, res, frame)
# while exit_ok == False
cam.release()
log.warning('Exiting thread {}'.format(thr_id))
#####################################################################################
def main():
log.basicConfig(format="[$(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
num_cam = args.num_cameras if (args.num_cameras) else 0
vid_src = args.file
network_name = args.network_name
input_layer = args.input_layer
output_layer = args.output_layer
input_dimension = args.frame_size
ip_addr = args.ip
port_no = args.port
if not args.file and not args.num_cameras:
log.error('Please supply either the camera or the video file. Try -f for options')
return
if not ip_addr or not port_no:
log.error('Please supply valid IP and/or port number of OVMS server')
return
video_files = []
if vid_src:
if os.path.isdir(vid_src):
for r, d, f in os.walk(vid_src):
for f_ in f:
# only mp4 files supported as of now
if '.mp4' in f_:
video_files.append(r + f_)
elif os.path.isfile(vid_src):
if '.mp4' in vid_src:
video_files.append(vid_src)
# thread management
thr = [None] * (num_cam + len(video_files))
# display buffers shared between camera threads
disp_buf = {}
# Known issue: Depending on the USB enumeration, camera nodes need not be
# in sequence. Pls pass the device node info through a file or command line
# if it happens in your system
for i in range(num_cam):
disp_buf[i] = None
thr[i] = threading.Thread(target=thread_function,
args=(i, network_name, input_layer, output_layer, input_dimension,
ip_addr, port_no, disp_buf, SRC_TYPE[0], None))
thr[i].start()
for i in range(num_cam, num_cam + len(video_files)):
disp_buf[i] = None
thr[i] = threading.Thread(target=thread_function,
args=(i, network_name, input_layer, output_layer, input_dimension,
ip_addr, port_no, disp_buf, SRC_TYPE[1], video_files[i - num_cam]))
thr[i].start()
# For whatever reasons, cv2.imshow() doesnt work from threads. Hence we shove the
# infered data to the main thread to display.
global exit_ok
while exit_ok == False:
for i in range(num_cam + len(video_files)):
if disp_buf[i] is not None:
cv2.imshow('Predictions {}'.format(i), disp_buf[i])
disp_buf[i] = None
# exit the program if 'q' is pressed on any window
if cv2.waitKey(1) == ord('q'):
exit_ok = True
break
# wait for all the threads to join
for i in range(num_cam):
thr[i].join()
# close all open windows
cv2.destroyAllWindows()
log.warning('Good Bye!')
if __name__ == '__main__':
sys.exit(main() or 0)
| 36.43299
| 91
| 0.658083
|
from __future__ import print_function
from argparse import ArgumentParser, SUPPRESS
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from time import time, sleep
import sys
import os
import cv2
import grpc
import threading
import logging as log
from tensorflow import make_tensor_proto, make_ndarray
CLASSES = ["None", "Pedestrian", "Vehicle", "Bike", "Other"]
COLORS = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (128, 128, 128)]
SRC_TYPE = ["Camera", "Video"]
exit_ok = False
CAM_WIDTH = 640
CAM_HEIGHT = 480
CAM_FPS = 30
CONFIDENCE_THRESHOLD = 0.75
se(2, 0, 1).reshape(1, 3, input_dimension, input_dimension)
image = image.astype('float32')
inf_time = time()
request.inputs[input_layer].CopyFrom(
make_tensor_proto(image, shape=None))
try:
result = stub.Predict(request, 10.0)
except Exception as e:
log.error('Caught exception {}'.format(e))
cam.release()
return
duration = time() - inf_time
res = make_ndarray(result.outputs[output_layer])
if not res.any():
log.error('Thr{}: Predictions came back with wrong output layer name'.format(thr_id))
dropped_fps = dropped_fps + 1
disp_buf[thr_id] = frame
else:
log.debug('Predictions came back fine')
inf_fps = inf_fps + 1
disp_buf[thr_id] = parse_output(thr_id, res, frame)
cam.release()
log.warning('Exiting thread {}'.format(thr_id))
| true
| true
|
790b457c2b67f172a92496547778a9b74a81f161
| 5,460
|
py
|
Python
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
LaurenceBeard/improver
|
b7cfe44f3a802d2a3d65f76a325215033c9de074
|
[
"BSD-3-Clause"
] | null | null | null |
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
LaurenceBeard/improver
|
b7cfe44f3a802d2a3d65f76a325215033c9de074
|
[
"BSD-3-Clause"
] | 2
|
2020-03-30T17:25:18.000Z
|
2021-06-25T15:30:29.000Z
|
improver_tests/ensemble_copula_coupling/ensemble_copula_coupling/test_ConvertLocationAndScaleParameters.py
|
LaurenceBeard/improver
|
b7cfe44f3a802d2a3d65f76a325215033c9de074
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for ConvertLocationAndScaleParameters
"""
import unittest
import numpy as np
from scipy import stats
from iris.tests import IrisTest
from improver.ensemble_copula_coupling.ensemble_copula_coupling import (
ConvertLocationAndScaleParameters as Plugin)
class Test__init__(IrisTest):
"""Test the __init__ method."""
def test_valid_distribution(self):
"""Test for a valid distribution."""
plugin = Plugin(distribution="norm")
self.assertEqual(plugin.distribution, stats.norm)
self.assertEqual(plugin.shape_parameters, [])
def test_valid_distribution_with_shape_parameters(self):
"""Test for a valid distribution with shape parameters."""
plugin = Plugin(distribution="truncnorm", shape_parameters=[0, np.inf])
self.assertEqual(plugin.distribution, stats.truncnorm)
self.assertEqual(plugin.shape_parameters, [0, np.inf])
def test_invalid_distribution(self):
"""Test for an invalid distribution."""
msg = "The distribution requested"
with self.assertRaisesRegex(AttributeError, msg):
Plugin(distribution="elephant")
class Test__repr__(IrisTest):
"""Test string representation of plugin."""
def test_basic(self):
"""Test string representation"""
expected_string = ("<ConvertLocationAndScaleParameters: "
"distribution: norm; shape_parameters: []>")
result = str(Plugin())
self.assertEqual(result, expected_string)
class Test__rescale_shape_parameters(IrisTest):
"""Test the _rescale_shape_parameters"""
def setUp(self):
"""Set up values for testing."""
self.location_parameter = np.array([-1, 0, 1])
self.scale_parameter = np.array([1, 1.5, 2])
def test_truncated_at_zero(self):
"""Test scaling shape parameters implying a truncation at zero."""
expected = [np.array([1., 0, -0.5]),
np.array([np.inf, np.inf, np.inf])]
shape_parameters = [0, np.inf]
plugin = Plugin(distribution="truncnorm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
def test_discrete_shape_parameters(self):
"""Test scaling discrete shape parameters."""
expected = [np.array([-3, -2.666667, -2.5]), np.array([7, 4, 2.5])]
shape_parameters = [-4, 6]
plugin = Plugin(distribution="truncnorm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
def test_alternative_distribution(self):
"""Test specifying a distribution other than truncated normal. In
this instance, no rescaling is applied."""
shape_parameters = [0, np.inf]
plugin = Plugin(distribution="norm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayEqual(plugin.shape_parameters, shape_parameters)
def test_no_shape_parameters_exception(self):
"""Test raising an exception when shape parameters are not specified
for the truncated normal distribution."""
plugin = Plugin(distribution="truncnorm")
msg = "For the truncated normal distribution"
with self.assertRaisesRegex(ValueError, msg):
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
if __name__ == '__main__':
unittest.main()
| 41.679389
| 79
| 0.694139
|
import unittest
import numpy as np
from scipy import stats
from iris.tests import IrisTest
from improver.ensemble_copula_coupling.ensemble_copula_coupling import (
ConvertLocationAndScaleParameters as Plugin)
class Test__init__(IrisTest):
def test_valid_distribution(self):
plugin = Plugin(distribution="norm")
self.assertEqual(plugin.distribution, stats.norm)
self.assertEqual(plugin.shape_parameters, [])
def test_valid_distribution_with_shape_parameters(self):
plugin = Plugin(distribution="truncnorm", shape_parameters=[0, np.inf])
self.assertEqual(plugin.distribution, stats.truncnorm)
self.assertEqual(plugin.shape_parameters, [0, np.inf])
def test_invalid_distribution(self):
msg = "The distribution requested"
with self.assertRaisesRegex(AttributeError, msg):
Plugin(distribution="elephant")
class Test__repr__(IrisTest):
def test_basic(self):
expected_string = ("<ConvertLocationAndScaleParameters: "
"distribution: norm; shape_parameters: []>")
result = str(Plugin())
self.assertEqual(result, expected_string)
class Test__rescale_shape_parameters(IrisTest):
def setUp(self):
self.location_parameter = np.array([-1, 0, 1])
self.scale_parameter = np.array([1, 1.5, 2])
def test_truncated_at_zero(self):
expected = [np.array([1., 0, -0.5]),
np.array([np.inf, np.inf, np.inf])]
shape_parameters = [0, np.inf]
plugin = Plugin(distribution="truncnorm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
def test_discrete_shape_parameters(self):
expected = [np.array([-3, -2.666667, -2.5]), np.array([7, 4, 2.5])]
shape_parameters = [-4, 6]
plugin = Plugin(distribution="truncnorm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayAlmostEqual(plugin.shape_parameters, expected)
def test_alternative_distribution(self):
shape_parameters = [0, np.inf]
plugin = Plugin(distribution="norm",
shape_parameters=shape_parameters)
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
self.assertArrayEqual(plugin.shape_parameters, shape_parameters)
def test_no_shape_parameters_exception(self):
plugin = Plugin(distribution="truncnorm")
msg = "For the truncated normal distribution"
with self.assertRaisesRegex(ValueError, msg):
plugin._rescale_shape_parameters(
self.location_parameter, self.scale_parameter)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790b4699dd5dd31c9f37723a99fcf6ed5a48c298
| 2,002
|
py
|
Python
|
tests/unittests/completion/complete.py
|
wilzbach/storyscript-sls
|
d71d74a53852ebae54bdaab341678b04f2775411
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/completion/complete.py
|
wilzbach/storyscript-sls
|
d71d74a53852ebae54bdaab341678b04f2775411
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/completion/complete.py
|
wilzbach/storyscript-sls
|
d71d74a53852ebae54bdaab341678b04f2775411
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import call
from sls.completion.complete import Completion
from sls.completion.context import CompletionContext
from sls.document import Document
import sls.sentry as sentry
def test_complete(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
cache = magic()
c = Completion(plugins=[], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(cache.update.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [],
}
def test_complete_plugin(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
my_plugin = magic()
i1 = {"label": "i1"}
i2 = {"label": "i2"}
cache = magic()
my_plugin.complete.return_value = [i1, i2]
c = Completion(plugins=[my_plugin], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(my_plugin.complete.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [i1, i2],
}
def test_complete_exec(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
patch.object(sentry, "handle_exception")
cache = magic()
plugin = magic()
ex = Exception("e")
plugin.complete.side_effect = ex
c = Completion(plugins=[plugin], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(cache.update.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [],
}
assert sentry.handle_exception.call_args == call(ex)
| 29.880597
| 76
| 0.653846
|
from unittest.mock import call
from sls.completion.complete import Completion
from sls.completion.context import CompletionContext
from sls.document import Document
import sls.sentry as sentry
def test_complete(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
cache = magic()
c = Completion(plugins=[], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(cache.update.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [],
}
def test_complete_plugin(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
my_plugin = magic()
i1 = {"label": "i1"}
i2 = {"label": "i2"}
cache = magic()
my_plugin.complete.return_value = [i1, i2]
c = Completion(plugins=[my_plugin], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(my_plugin.complete.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [i1, i2],
}
def test_complete_exec(magic, patch):
patch.init(Document)
patch.many(Document, ["line_to_cursor", "word_to_cursor"])
patch.many(CompletionContext, ["_blocks"])
patch.object(sentry, "handle_exception")
cache = magic()
plugin = magic()
ex = Exception("e")
plugin.complete.side_effect = ex
c = Completion(plugins=[plugin], context_cache=cache)
doc = Document()
ws = magic()
pos = magic()
result = c.complete(ws, doc, pos)
assert isinstance(cache.update.call_args[0][0], CompletionContext)
assert result == {
"isIncomplete": False,
"items": [],
}
assert sentry.handle_exception.call_args == call(ex)
| true
| true
|
790b47d371223d86c745da820c22453f2e1f195c
| 14,205
|
py
|
Python
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.vision_v1p3beta1.types import image_annotator
from google.longrunning import operations_pb2 # type: ignore
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
class ImageAnnotatorGrpcTransport(ImageAnnotatorTransport):
"""gRPC backend transport for ImageAnnotator.
Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'vision.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'vision.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def batch_annotate_images(self) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
image_annotator.BatchAnnotateImagesResponse]:
r"""Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
~.BatchAnnotateImagesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_annotate_images' not in self._stubs:
self._stubs['batch_annotate_images'] = self.grpc_channel.unary_unary(
'/google.cloud.vision.v1p3beta1.ImageAnnotator/BatchAnnotateImages',
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs['batch_annotate_images']
@property
def async_batch_annotate_files(self) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
operations_pb2.Operation]:
r"""Return a callable for the async batch annotate files method over gRPC.
Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'async_batch_annotate_files' not in self._stubs:
self._stubs['async_batch_annotate_files'] = self.grpc_channel.unary_unary(
'/google.cloud.vision.v1p3beta1.ImageAnnotator/AsyncBatchAnnotateFiles',
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['async_batch_annotate_files']
def close(self):
self.grpc_channel.close()
__all__ = (
'ImageAnnotatorGrpcTransport',
)
| 45.822581
| 94
| 0.638226
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.cloud.vision_v1p3beta1.types import image_annotator
from google.longrunning import operations_pb2
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
class ImageAnnotatorGrpcTransport(ImageAnnotatorTransport):
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'vision.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'vision.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
return self._operations_client
@property
def batch_annotate_images(self) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
image_annotator.BatchAnnotateImagesResponse]:
if 'batch_annotate_images' not in self._stubs:
self._stubs['batch_annotate_images'] = self.grpc_channel.unary_unary(
'/google.cloud.vision.v1p3beta1.ImageAnnotator/BatchAnnotateImages',
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs['batch_annotate_images']
@property
def async_batch_annotate_files(self) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
operations_pb2.Operation]:
if 'async_batch_annotate_files' not in self._stubs:
self._stubs['async_batch_annotate_files'] = self.grpc_channel.unary_unary(
'/google.cloud.vision.v1p3beta1.ImageAnnotator/AsyncBatchAnnotateFiles',
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['async_batch_annotate_files']
def close(self):
self.grpc_channel.close()
__all__ = (
'ImageAnnotatorGrpcTransport',
)
| true
| true
|
790b483c1a649cac7ac504a8b759324430b31b1f
| 8,241
|
py
|
Python
|
cumulusci/robotframework/CumulusCI.py
|
jdominiczak/CumulusCI
|
f706c1906f9eb6d604c571a9dd16f5d0ed38599f
|
[
"BSD-3-Clause"
] | null | null | null |
cumulusci/robotframework/CumulusCI.py
|
jdominiczak/CumulusCI
|
f706c1906f9eb6d604c571a9dd16f5d0ed38599f
|
[
"BSD-3-Clause"
] | null | null | null |
cumulusci/robotframework/CumulusCI.py
|
jdominiczak/CumulusCI
|
f706c1906f9eb6d604c571a9dd16f5d0ed38599f
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from simple_salesforce import Salesforce
from cumulusci.cli.config import CliRuntime
from cumulusci.core.config import TaskConfig
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.tasks import CURRENT_TASK
from cumulusci.core.utils import import_global
from cumulusci.robotframework.utils import set_pdb_trace
from cumulusci.tasks.robotframework.robotframework import Robot
class CumulusCI(object):
""" Library for accessing CumulusCI for the local git project
This library allows Robot Framework tests to access credentials to a
Salesforce org created by CumulusCI, including Scratch Orgs. It also
exposes the core logic of CumulusCI including interactions with the
Salesforce API's and project specific configuration including custom
and customized tasks and flows.
Initialization requires a single argument, the org name for the target
CumulusCI org. If running your tests via cci's robot task (recommended),
you can initialize the library in your tests taking advantage of the
variable set by the robot task:
| ``*** Settings ***``
|
| Library cumulusci.robotframework.CumulusCI ${ORG}
"""
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self, org_name=None):
if not org_name:
org_name = "dev"
self.org_name = org_name
self._project_config = None
self._org = None
self._sf = None
self._tooling = None
# Turn off info logging of all http requests
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
@property
def project_config(self):
if self._project_config is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's config
return CURRENT_TASK.stack[0].project_config
else:
logger.console("Initializing CumulusCI config\n")
self._project_config = CliRuntime().project_config
return self._project_config
def set_project_config(self, project_config):
logger.console("\n")
self._project_config = project_config
@property
def keychain(self):
return self.project_config.keychain
@property
def org(self):
if self._org is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's org
return CURRENT_TASK.stack[0].org_config
else:
self._org = self.keychain.get_org(self.org_name)
return self._org
@property
def sf(self):
if self._sf is None:
self._sf = self._init_api()
return self._sf
@property
def tooling(self):
if self._tooling is None:
self._tooling = self._init_api("tooling/")
return self._tooling
def set_login_url(self):
""" Sets the LOGIN_URL variable in the suite scope which will
automatically log into the target Salesforce org.
Typically, this is run during Suite Setup
"""
BuiltIn().set_suite_variable("${LOGIN_URL}", self.org.start_url)
def get_org_info(self):
""" Returns a dictionary of the org information for the current target
Salesforce org
"""
return self.org.config
def login_url(self, org=None):
""" Returns the login url which will automatically log into the target
Salesforce org. By default, the org_name passed to the library
constructor is used but this can be overridden with the org option
to log into a different org.
"""
if org is None:
org = self.org
else:
org = self.keychain.get_org(org)
return org.start_url
def get_namespace_prefix(self, package=None):
""" Returns the namespace prefix (including __) for the specified package name.
(Defaults to project__package__name_managed from the current project config.)
Returns an empty string if the package is not installed as a managed package.
"""
result = ""
if package is None:
package = self.project_config.project__package__name_managed
packages = self.tooling.query(
"SELECT SubscriberPackage.NamespacePrefix, SubscriberPackage.Name "
"FROM InstalledSubscriberPackage"
)
match = [
p for p in packages["records"] if p["SubscriberPackage"]["Name"] == package
]
if match:
result = match[0]["SubscriberPackage"]["NamespacePrefix"] + "__"
return result
def run_task(self, task_name, **options):
""" Runs a named CumulusCI task for the current project with optional
support for overriding task options via kwargs.
Examples:
| =Keyword= | =task_name= | =task_options= | =comment= |
| Run Task | deploy | | Run deploy with standard options |
| Run Task | deploy | path=path/to/some/metadata | Run deploy with custom path |
"""
task_config = self.project_config.get_task(task_name)
class_path = task_config.class_path
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, task_config)
return self._run_task(task_class, task_config)
def run_task_class(self, class_path, **options):
""" Runs a CumulusCI task class with task options via kwargs.
Use this keyword to run logic from CumulusCI tasks which have not
been configured in the project's cumulusci.yml file. This is
most useful in cases where a test needs to use task logic for
logic unique to the test and thus not worth making into a named
task for the project
Examples:
| =Keyword= | =task_class= | =task_options= |
| Run Task Class | cumulusci.task.utils.DownloadZip | url=http://test.com/test.zip dir=test_zip |
"""
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, TaskConfig())
return self._run_task(task_class, task_config)
def _init_api(self, base_url=None):
api_version = self.project_config.project__package__api_version
rv = Salesforce(
instance=self.org.instance_url.replace("https://", ""),
session_id=self.org.access_token,
version=api_version,
)
if base_url is not None:
rv.base_url += base_url
return rv
def _init_task(self, class_path, options, task_config):
task_class = import_global(class_path)
task_config = self._parse_task_options(options, task_class, task_config)
return task_class, task_config
def _parse_task_options(self, options, task_class, task_config):
if "options" not in task_config.config:
task_config.config["options"] = {}
# Parse options and add to task config
if options:
for name, value in options.items():
# Validate the option
if name not in task_class.task_options:
raise TaskOptionsError(
'Option "{}" is not available for task {}'.format(
name, task_class
)
)
# Override the option in the task config
task_config.config["options"][name] = value
return task_config
def _run_task(self, task_class, task_config):
task = task_class(self.project_config, task_config, org_config=self.org)
task()
return task.return_values
def debug(self):
"""Pauses execution and enters the Python debugger."""
set_pdb_trace()
| 38.509346
| 109
| 0.62638
|
import logging
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from simple_salesforce import Salesforce
from cumulusci.cli.config import CliRuntime
from cumulusci.core.config import TaskConfig
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.tasks import CURRENT_TASK
from cumulusci.core.utils import import_global
from cumulusci.robotframework.utils import set_pdb_trace
from cumulusci.tasks.robotframework.robotframework import Robot
class CumulusCI(object):
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(self, org_name=None):
if not org_name:
org_name = "dev"
self.org_name = org_name
self._project_config = None
self._org = None
self._sf = None
self._tooling = None
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARN
)
@property
def project_config(self):
if self._project_config is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
return CURRENT_TASK.stack[0].project_config
else:
logger.console("Initializing CumulusCI config\n")
self._project_config = CliRuntime().project_config
return self._project_config
def set_project_config(self, project_config):
logger.console("\n")
self._project_config = project_config
@property
def keychain(self):
return self.project_config.keychain
@property
def org(self):
if self._org is None:
if CURRENT_TASK.stack and isinstance(CURRENT_TASK.stack[0], Robot):
# If CumulusCI is running a task, use that task's org
return CURRENT_TASK.stack[0].org_config
else:
self._org = self.keychain.get_org(self.org_name)
return self._org
@property
def sf(self):
if self._sf is None:
self._sf = self._init_api()
return self._sf
@property
def tooling(self):
if self._tooling is None:
self._tooling = self._init_api("tooling/")
return self._tooling
def set_login_url(self):
BuiltIn().set_suite_variable("${LOGIN_URL}", self.org.start_url)
def get_org_info(self):
return self.org.config
def login_url(self, org=None):
if org is None:
org = self.org
else:
org = self.keychain.get_org(org)
return org.start_url
def get_namespace_prefix(self, package=None):
result = ""
if package is None:
package = self.project_config.project__package__name_managed
packages = self.tooling.query(
"SELECT SubscriberPackage.NamespacePrefix, SubscriberPackage.Name "
"FROM InstalledSubscriberPackage"
)
match = [
p for p in packages["records"] if p["SubscriberPackage"]["Name"] == package
]
if match:
result = match[0]["SubscriberPackage"]["NamespacePrefix"] + "__"
return result
def run_task(self, task_name, **options):
task_config = self.project_config.get_task(task_name)
class_path = task_config.class_path
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, task_config)
return self._run_task(task_class, task_config)
def run_task_class(self, class_path, **options):
logger.console("\n")
task_class, task_config = self._init_task(class_path, options, TaskConfig())
return self._run_task(task_class, task_config)
def _init_api(self, base_url=None):
api_version = self.project_config.project__package__api_version
rv = Salesforce(
instance=self.org.instance_url.replace("https://", ""),
session_id=self.org.access_token,
version=api_version,
)
if base_url is not None:
rv.base_url += base_url
return rv
def _init_task(self, class_path, options, task_config):
task_class = import_global(class_path)
task_config = self._parse_task_options(options, task_class, task_config)
return task_class, task_config
def _parse_task_options(self, options, task_class, task_config):
if "options" not in task_config.config:
task_config.config["options"] = {}
if options:
for name, value in options.items():
if name not in task_class.task_options:
raise TaskOptionsError(
'Option "{}" is not available for task {}'.format(
name, task_class
)
)
task_config.config["options"][name] = value
return task_config
def _run_task(self, task_class, task_config):
task = task_class(self.project_config, task_config, org_config=self.org)
task()
return task.return_values
def debug(self):
set_pdb_trace()
| true
| true
|
790b491e6f3712b5dc5c12f2a5c793aae2a5f057
| 2,718
|
py
|
Python
|
tests/api/test_contacts_endpoint.py
|
mathematicalmichael/cannlytics
|
acc2b8c73fd3689283b41bd275a1885fe37153a6
|
[
"MIT"
] | null | null | null |
tests/api/test_contacts_endpoint.py
|
mathematicalmichael/cannlytics
|
acc2b8c73fd3689283b41bd275a1885fe37153a6
|
[
"MIT"
] | null | null | null |
tests/api/test_contacts_endpoint.py
|
mathematicalmichael/cannlytics
|
acc2b8c73fd3689283b41bd275a1885fe37153a6
|
[
"MIT"
] | null | null | null |
"""
Test Contacts API Endpoint | Cannlytics API
Author: Keegan Skeate
Contact: <keegan@cannlytics.com>
Created: 7/19/2021
Updated: 7/19/2021
License: MIT License <https://opensource.org/licenses/MIT>
"""
import os
import requests
from dotenv import load_dotenv
# Test using development server.
BASE = 'http://127.0.0.1:8000/api'
# Uncomment to test with production server.
# BASE = 'https://console.cannlytics.com/api'
# Load your API key.
load_dotenv('../../.env')
API_KEY = os.getenv('CANNLYTICS_API_KEY')
# Pass your API key through the authorization header as a bearer token.
HEADERS = {
'Authorization': 'Bearer %s' % API_KEY,
'Content-type': 'application/json',
}
# Identify the organization that you are working with.
ORG_ID = 'test-company'
# Define the endpoint.
ENDPOINT = 'contacts'
#------------------------------------------------------------------------------
# Create a contact.
#------------------------------------------------------------------------------
data = {
'address': '',
'city': '',
'contact_id': 'TEST',
'county': '',
'email': '',
'latitude': '',
'longitude': '',
'organization': 'Cannlytics Test Contact',
'phone': '',
'state': '',
'street': '',
'website': '',
'zip_code': ''
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.post(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Created:', response.json()['data'])
#------------------------------------------------------------------------------
# Get contacts.
#------------------------------------------------------------------------------
organization_id = 'test-company'
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.get(url, headers=HEADERS)
assert response.status_code == 200
data = response.json()['data']
print('Found:', len(data))
#------------------------------------------------------------------------------
# Update a contact.
#------------------------------------------------------------------------------
data = {
'contact_id': 'TEST',
'city': 'Tulsa',
'state': 'OK',
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.post(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Updated:', response.json()['data'])
#------------------------------------------------------------------------------
# Delete a contact.
#------------------------------------------------------------------------------
data = {
'contact_id': 'TEST',
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.delete(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Deleted:', response.json()['data'])
| 29.543478
| 79
| 0.513245
|
import os
import requests
from dotenv import load_dotenv
BASE = 'http://127.0.0.1:8000/api'
load_dotenv('../../.env')
API_KEY = os.getenv('CANNLYTICS_API_KEY')
HEADERS = {
'Authorization': 'Bearer %s' % API_KEY,
'Content-type': 'application/json',
}
ORG_ID = 'test-company'
ENDPOINT = 'contacts'
data = {
'address': '',
'city': '',
'contact_id': 'TEST',
'county': '',
'email': '',
'latitude': '',
'longitude': '',
'organization': 'Cannlytics Test Contact',
'phone': '',
'state': '',
'street': '',
'website': '',
'zip_code': ''
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.post(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Created:', response.json()['data'])
organization_id = 'test-company'
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.get(url, headers=HEADERS)
assert response.status_code == 200
data = response.json()['data']
print('Found:', len(data))
data = {
'contact_id': 'TEST',
'city': 'Tulsa',
'state': 'OK',
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.post(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Updated:', response.json()['data'])
data = {
'contact_id': 'TEST',
}
url = f'{BASE}/{ENDPOINT}?organization_id={ORG_ID}'
response = requests.delete(url, json=data, headers=HEADERS)
assert response.status_code == 200
print('Deleted:', response.json()['data'])
| true
| true
|
790b49b567e9535f1f3902bdb1b6cb997fb43092
| 9,907
|
py
|
Python
|
tests/system/test_dbapi.py
|
jpburbank/python-spanner-tmp
|
02114181a2baf4b178d4e67e672b45b1d49e835d
|
[
"Apache-2.0"
] | null | null | null |
tests/system/test_dbapi.py
|
jpburbank/python-spanner-tmp
|
02114181a2baf4b178d4e67e672b45b1d49e835d
|
[
"Apache-2.0"
] | null | null | null |
tests/system/test_dbapi.py
|
jpburbank/python-spanner-tmp
|
02114181a2baf4b178d4e67e672b45b1d49e835d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import pickle
import pytest
from google.cloud import spanner_v1
from google.cloud.spanner_dbapi.connection import Connection
from . import _helpers
DATABASE_NAME = "dbapi-txn"
DDL_STATEMENTS = (
"""CREATE TABLE contacts (
contact_id INT64,
first_name STRING(1024),
last_name STRING(1024),
email STRING(1024)
)
PRIMARY KEY (contact_id)""",
)
@pytest.fixture(scope="session")
def raw_database(shared_instance, database_operation_timeout):
databse_id = _helpers.unique_id("dbapi-txn")
pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"})
database = shared_instance.database(
databse_id, ddl_statements=DDL_STATEMENTS, pool=pool,
)
op = database.create()
op.result(database_operation_timeout) # raises on failure / timeout.
yield database
database.drop()
def clear_table(transaction):
transaction.execute_update("DELETE FROM contacts WHERE true")
@pytest.fixture(scope="function")
def dbapi_database(raw_database):
raw_database.run_in_transaction(clear_table)
yield raw_database
raw_database.run_in_transaction(clear_table)
def test_commit(shared_instance, dbapi_database):
"""Test committing a transaction with several statements."""
want_row = (
1,
"updated-first-name",
"last-name",
"test.email_updated@domen.ru",
)
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
# execute several DML statements within one transaction
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
cursor.execute(
"""
UPDATE contacts
SET email = 'test.email_updated@domen.ru'
WHERE email = 'test.email@domen.ru'
"""
)
conn.commit()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_rollback(shared_instance, dbapi_database):
"""Test rollbacking a transaction with several statements."""
want_row = (2, "first-name", "last-name", "test.email@domen.ru")
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
conn.commit()
# execute several DMLs with one transaction
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
cursor.execute(
"""
UPDATE contacts
SET email = 'test.email_updated@domen.ru'
WHERE email = 'test.email@domen.ru'
"""
)
conn.rollback()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_autocommit_mode_change(shared_instance, dbapi_database):
"""Test auto committing a transaction on `autocommit` mode change."""
want_row = (
2,
"updated-first-name",
"last-name",
"test.email@domen.ru",
)
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
conn.autocommit = True
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_rollback_on_connection_closing(shared_instance, dbapi_database):
"""
When closing a connection all the pending transactions
must be rollbacked. Testing if it's working this way.
"""
want_row = (1, "first-name", "last-name", "test.email@domen.ru")
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
conn.commit()
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
conn.close()
# connect again, as the previous connection is no-op after closing
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_results_checksum(shared_instance, dbapi_database):
"""Test that results checksum is calculated properly."""
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES
(1, 'first-name', 'last-name', 'test.email@domen.ru'),
(2, 'first-name2', 'last-name2', 'test.email2@domen.ru')
"""
)
assert len(conn._statements) == 1
conn.commit()
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
assert len(conn._statements) == 1
conn.commit()
checksum = hashlib.sha256()
checksum.update(pickle.dumps(got_rows[0]))
checksum.update(pickle.dumps(got_rows[1]))
assert cursor._checksum.checksum.digest() == checksum.digest()
def test_execute_many(shared_instance, dbapi_database):
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
row_data = [
(1, "first-name", "last-name", "test.email@example.com"),
(2, "first-name2", "last-name2", "test.email2@example.com"),
]
cursor.executemany(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (%s, %s, %s, %s)
""",
row_data,
)
conn.commit()
cursor.executemany(
"""SELECT * FROM contacts WHERE contact_id = @a1""", ({"a1": 1}, {"a1": 2}),
)
res = cursor.fetchall()
conn.commit()
assert len(res) == len(row_data)
for found, expected in zip(res, row_data):
assert found[0] == expected[0]
# checking that execute() and executemany()
# results are not mixed together
cursor.execute(
"""
SELECT * FROM contacts WHERE contact_id = 1
""",
)
res = cursor.fetchone()
conn.commit()
assert res[0] == 1
conn.close()
def test_DDL_autocommit(shared_instance, dbapi_database):
"""Check that DDLs in autocommit mode are immediately executed."""
conn = Connection(shared_instance, dbapi_database)
conn.autocommit = True
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
Name STRING(1024),
) PRIMARY KEY (SingerId)
"""
)
conn.close()
# if previous DDL wasn't committed, the next DROP TABLE
# statement will fail with a ProgrammingError
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute("DROP TABLE Singers")
conn.commit()
def test_DDL_commit(shared_instance, dbapi_database):
"""Check that DDLs in commit mode are executed on calling `commit()`."""
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
Name STRING(1024),
) PRIMARY KEY (SingerId)
"""
)
conn.commit()
conn.close()
# if previous DDL wasn't committed, the next DROP TABLE
# statement will fail with a ProgrammingError
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute("DROP TABLE Singers")
conn.commit()
def test_ping(shared_instance, dbapi_database):
"""Check connection validation method."""
conn = Connection(shared_instance, dbapi_database)
conn.validate()
conn.close()
def test_update_non_autocommit(shared_instance, dbapi_database):
setup_rows = """
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES
(1, 'first-name', 'last-name', 'get@domen.ru'),
(2, 'first-name', 'last-name', 'get@domen.ru'),
(3, 'first-name', 'last-name', 'ignore@domen.ru')
"""
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(setup_rows)
conn.commit()
cursor.execute(
"UPDATE contacts SET first_name='changed' WHERE email='get@domen.ru'"
)
conn.commit()
assert cursor.rowcount == 2
| 26.071053
| 84
| 0.66559
|
import hashlib
import pickle
import pytest
from google.cloud import spanner_v1
from google.cloud.spanner_dbapi.connection import Connection
from . import _helpers
DATABASE_NAME = "dbapi-txn"
DDL_STATEMENTS = (
"""CREATE TABLE contacts (
contact_id INT64,
first_name STRING(1024),
last_name STRING(1024),
email STRING(1024)
)
PRIMARY KEY (contact_id)""",
)
@pytest.fixture(scope="session")
def raw_database(shared_instance, database_operation_timeout):
databse_id = _helpers.unique_id("dbapi-txn")
pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"})
database = shared_instance.database(
databse_id, ddl_statements=DDL_STATEMENTS, pool=pool,
)
op = database.create()
op.result(database_operation_timeout)
yield database
database.drop()
def clear_table(transaction):
transaction.execute_update("DELETE FROM contacts WHERE true")
@pytest.fixture(scope="function")
def dbapi_database(raw_database):
raw_database.run_in_transaction(clear_table)
yield raw_database
raw_database.run_in_transaction(clear_table)
def test_commit(shared_instance, dbapi_database):
want_row = (
1,
"updated-first-name",
"last-name",
"test.email_updated@domen.ru",
)
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
cursor.execute(
"""
UPDATE contacts
SET email = 'test.email_updated@domen.ru'
WHERE email = 'test.email@domen.ru'
"""
)
conn.commit()
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_rollback(shared_instance, dbapi_database):
want_row = (2, "first-name", "last-name", "test.email@domen.ru")
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
conn.commit()
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
cursor.execute(
"""
UPDATE contacts
SET email = 'test.email_updated@domen.ru'
WHERE email = 'test.email@domen.ru'
"""
)
conn.rollback()
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_autocommit_mode_change(shared_instance, dbapi_database):
want_row = (
2,
"updated-first-name",
"last-name",
"test.email@domen.ru",
)
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
conn.autocommit = True
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_rollback_on_connection_closing(shared_instance, dbapi_database):
want_row = (1, "first-name", "last-name", "test.email@domen.ru")
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
conn.commit()
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
conn.close()
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_results_checksum(shared_instance, dbapi_database):
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES
(1, 'first-name', 'last-name', 'test.email@domen.ru'),
(2, 'first-name2', 'last-name2', 'test.email2@domen.ru')
"""
)
assert len(conn._statements) == 1
conn.commit()
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
assert len(conn._statements) == 1
conn.commit()
checksum = hashlib.sha256()
checksum.update(pickle.dumps(got_rows[0]))
checksum.update(pickle.dumps(got_rows[1]))
assert cursor._checksum.checksum.digest() == checksum.digest()
def test_execute_many(shared_instance, dbapi_database):
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
row_data = [
(1, "first-name", "last-name", "test.email@example.com"),
(2, "first-name2", "last-name2", "test.email2@example.com"),
]
cursor.executemany(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (%s, %s, %s, %s)
""",
row_data,
)
conn.commit()
cursor.executemany(
"""SELECT * FROM contacts WHERE contact_id = @a1""", ({"a1": 1}, {"a1": 2}),
)
res = cursor.fetchall()
conn.commit()
assert len(res) == len(row_data)
for found, expected in zip(res, row_data):
assert found[0] == expected[0]
cursor.execute(
"""
SELECT * FROM contacts WHERE contact_id = 1
""",
)
res = cursor.fetchone()
conn.commit()
assert res[0] == 1
conn.close()
def test_DDL_autocommit(shared_instance, dbapi_database):
conn = Connection(shared_instance, dbapi_database)
conn.autocommit = True
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
Name STRING(1024),
) PRIMARY KEY (SingerId)
"""
)
conn.close()
# statement will fail with a ProgrammingError
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute("DROP TABLE Singers")
conn.commit()
def test_DDL_commit(shared_instance, dbapi_database):
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
Name STRING(1024),
) PRIMARY KEY (SingerId)
"""
)
conn.commit()
conn.close()
# if previous DDL wasn't committed, the next DROP TABLE
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute("DROP TABLE Singers")
conn.commit()
def test_ping(shared_instance, dbapi_database):
conn = Connection(shared_instance, dbapi_database)
conn.validate()
conn.close()
def test_update_non_autocommit(shared_instance, dbapi_database):
setup_rows = """
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES
(1, 'first-name', 'last-name', 'get@domen.ru'),
(2, 'first-name', 'last-name', 'get@domen.ru'),
(3, 'first-name', 'last-name', 'ignore@domen.ru')
"""
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(setup_rows)
conn.commit()
cursor.execute(
"UPDATE contacts SET first_name='changed' WHERE email='get@domen.ru'"
)
conn.commit()
assert cursor.rowcount == 2
| true
| true
|
790b49c6aa358e86089bdfefcaaf2050a0f02eb6
| 6,830
|
py
|
Python
|
contract_api/testcases/unit_testcases/consumers/test_service_event_consumer.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
contract_api/testcases/unit_testcases/consumers/test_service_event_consumer.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
contract_api/testcases/unit_testcases/consumers/test_service_event_consumer.py
|
vinthedark/snet-marketplace-service
|
66ed9d093b00f09d3e28ef4d86c4e4c125037d06
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import datetime
from unittest.mock import patch
from common.repository import Repository
from contract_api.config import NETWORKS, NETWORK_ID
from contract_api.consumers.service_event_consumer import ServiceCreatedEventConsumer
from contract_api.dao.service_repository import ServiceRepository
class TestOrganizationEventConsumer(unittest.TestCase):
def setUp(self):
pass
@patch('common.s3_util.S3Util.push_io_bytes_to_s3')
@patch('common.ipfs_util.IPFSUtil.read_file_from_ipfs')
@patch('common.ipfs_util.IPFSUtil.read_bytesio_from_ipfs')
@patch('contract_api.consumers.service_event_consumer.ServiceEventConsumer._fetch_tags')
def test_on_service_created_event(self, mock_fetch_tags, nock_read_bytesio_from_ipfs, mock_ipfs_read, mock_s3_push):
event = {"data": {'row_id': 202, 'block_no': 6325625, 'event': 'ServiceCreated',
'json_str': "{'orgId': b'snet\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', 'serviceId': b'gene-annotation-service\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', 'metadataURI': b'ipfs://QmdGjaVYPMSGpC1qT3LDALSNCCu7JPf7j51H1GQirvQJYf\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'}",
'processed': b'\x00',
'transactionHash': 'b"\\xa7P*\\xaf\\xfd\\xd5.E\\x8c\\x0bKAF\'\\x15\\x03\\xef\\xdaO\'\\x86/<\\xfb\\xc4\\xf0@\\xf0\\xc1P\\x8c\\xc7"',
'logIndex': '0', 'error_code': 1, 'error_msg': '',
'row_updated': datetime(2019, 10, 21, 9, 59, 37),
'row_created': datetime(2019, 10, 21, 9, 59, 37)}, "name": "ServiceCreated"}
connection = Repository(NETWORK_ID, NETWORKS=NETWORKS)
service_repository = ServiceRepository(connection)
service_repository.delete_service(org_id='snet', service_id='gene-annotation-service')
service_repository.delete_service_dependents(org_id='snet', service_id='gene-annotation-service')
nock_read_bytesio_from_ipfs.return_value = "some_value to_be_pushed_to_s3_whic_is_mocked"
mock_ipfs_read.return_value = {
"version": 1,
"display_name": "Annotation Service",
"encoding": "proto",
"service_type": "grpc",
"model_ipfs_hash": "QmXqonxB9EvNBe11J8oCYXMQAtPKAb2x8CyFLmQpkvVaLf",
"mpe_address": "0x8FB1dC8df86b388C7e00689d1eCb533A160B4D0C",
"groups": [
{
"group_name": "default_group",
"pricing": [
{
"price_model": "fixed_price",
"price_in_cogs": 1,
"default": True
}
],
"endpoints": [
"https://mozi.ai:8000"
],
"group_id": "m5FKWq4hW0foGW5qSbzGSjgZRuKs7A1ZwbIrJ9e96rc="
}
],
"assets": {
"hero_image": "QmVcE6fEDP764ibadXTjZHk251Lmt5xAxdc4P9mPA4kksk/hero_gene-annotation-2b.png"
},
"service_description": {
"url": "https://mozi-ai.github.io/annotation-service/",
"description": "Use this service to annotate a humane genome with uniform terms, Reactome pathway memberships, and BioGrid protein interactions.",
"short_description": "short description"
},
"contributors": [
{
"name": "dummy dummy",
"email_id": "dummy@dummy.io"
}
]
}
mock_fetch_tags.return_value = ["test", "", "", [b'\x61\x74\x6F\x6D\x65\x73\x65',
b'\x62\x69\x6F\x69\x6E\x66\x6F\x72\x6D\x61\x74\x69\x63\x73']]
mock_s3_push.return_value = "https://test-s3-push"
org_event_consumer = ServiceCreatedEventConsumer("wss://ropsten.infura.io/ws", "http://ipfs.singularitynet.io",
80)
org_event_consumer.on_event(event=event)
service = service_repository.get_service(org_id='snet', service_id='gene-annotation-service')
service_metadata = service_repository.get_service_metadata(org_id='snet', service_id='gene-annotation-service')
service_endpoints = service_repository.get_service_endpoints(org_id='snet',
service_id='gene-annotation-service')
service_tags = service_repository.get_service_tags(org_id='snet', service_id='gene-annotation-service')
assert service == {'org_id': 'snet', 'service_id': 'gene-annotation-service', 'service_path': None,
'ipfs_hash': 'QmdGjaVYPMSGpC1qT3LDALSNCCu7JPf7j51H1GQirvQJYf', 'is_curated': 0}
assert service_metadata == {'org_id': 'snet', 'service_id': 'gene-annotation-service',
'display_name': 'Annotation Service',
'description': 'Use this service to annotate a humane genome with uniform terms, Reactome pathway memberships, and BioGrid protein interactions.',
'short_description': 'short description',
'url': 'https://mozi-ai.github.io/annotation-service/', 'json': '',
'model_ipfs_hash': 'QmXqonxB9EvNBe11J8oCYXMQAtPKAb2x8CyFLmQpkvVaLf',
'encoding': 'proto', 'type': 'grpc',
'mpe_address': '0x8FB1dC8df86b388C7e00689d1eCb533A160B4D0C',
'assets_url': '{"hero_image": "https://test-s3-push"}',
'assets_hash': '{"hero_image": "QmVcE6fEDP764ibadXTjZHk251Lmt5xAxdc4P9mPA4kksk/hero_gene-annotation-2b.png"}',
'service_rating': '{"rating": 0.0, "total_users_rated": 0}', 'ranking': 1,
'contributors': '[{"name": "dummy dummy", "email_id": "dummy@dummy.io"}]'}
assert service_endpoints == [{'org_id': 'snet', 'service_id': 'gene-annotation-service',
'group_id': 'm5FKWq4hW0foGW5qSbzGSjgZRuKs7A1ZwbIrJ9e96rc=',
'endpoint': 'https://mozi.ai:8000'}]
assert service_tags == [{'org_id': 'snet', 'service_id': 'gene-annotation-service', 'tag_name': 'atomese'},
{'org_id': 'snet', 'service_id': 'gene-annotation-service',
'tag_name': 'bioinformatics'}]
| 65.047619
| 413
| 0.569985
|
import unittest
from datetime import datetime
from unittest.mock import patch
from common.repository import Repository
from contract_api.config import NETWORKS, NETWORK_ID
from contract_api.consumers.service_event_consumer import ServiceCreatedEventConsumer
from contract_api.dao.service_repository import ServiceRepository
class TestOrganizationEventConsumer(unittest.TestCase):
def setUp(self):
pass
@patch('common.s3_util.S3Util.push_io_bytes_to_s3')
@patch('common.ipfs_util.IPFSUtil.read_file_from_ipfs')
@patch('common.ipfs_util.IPFSUtil.read_bytesio_from_ipfs')
@patch('contract_api.consumers.service_event_consumer.ServiceEventConsumer._fetch_tags')
def test_on_service_created_event(self, mock_fetch_tags, nock_read_bytesio_from_ipfs, mock_ipfs_read, mock_s3_push):
event = {"data": {'row_id': 202, 'block_no': 6325625, 'event': 'ServiceCreated',
'json_str': "{'orgId': b'snet\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', 'serviceId': b'gene-annotation-service\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', 'metadataURI': b'ipfs://QmdGjaVYPMSGpC1qT3LDALSNCCu7JPf7j51H1GQirvQJYf\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'}",
'processed': b'\x00',
'transactionHash': 'b"\\xa7P*\\xaf\\xfd\\xd5.E\\x8c\\x0bKAF\'\\x15\\x03\\xef\\xdaO\'\\x86/<\\xfb\\xc4\\xf0@\\xf0\\xc1P\\x8c\\xc7"',
'logIndex': '0', 'error_code': 1, 'error_msg': '',
'row_updated': datetime(2019, 10, 21, 9, 59, 37),
'row_created': datetime(2019, 10, 21, 9, 59, 37)}, "name": "ServiceCreated"}
connection = Repository(NETWORK_ID, NETWORKS=NETWORKS)
service_repository = ServiceRepository(connection)
service_repository.delete_service(org_id='snet', service_id='gene-annotation-service')
service_repository.delete_service_dependents(org_id='snet', service_id='gene-annotation-service')
nock_read_bytesio_from_ipfs.return_value = "some_value to_be_pushed_to_s3_whic_is_mocked"
mock_ipfs_read.return_value = {
"version": 1,
"display_name": "Annotation Service",
"encoding": "proto",
"service_type": "grpc",
"model_ipfs_hash": "QmXqonxB9EvNBe11J8oCYXMQAtPKAb2x8CyFLmQpkvVaLf",
"mpe_address": "0x8FB1dC8df86b388C7e00689d1eCb533A160B4D0C",
"groups": [
{
"group_name": "default_group",
"pricing": [
{
"price_model": "fixed_price",
"price_in_cogs": 1,
"default": True
}
],
"endpoints": [
"https://mozi.ai:8000"
],
"group_id": "m5FKWq4hW0foGW5qSbzGSjgZRuKs7A1ZwbIrJ9e96rc="
}
],
"assets": {
"hero_image": "QmVcE6fEDP764ibadXTjZHk251Lmt5xAxdc4P9mPA4kksk/hero_gene-annotation-2b.png"
},
"service_description": {
"url": "https://mozi-ai.github.io/annotation-service/",
"description": "Use this service to annotate a humane genome with uniform terms, Reactome pathway memberships, and BioGrid protein interactions.",
"short_description": "short description"
},
"contributors": [
{
"name": "dummy dummy",
"email_id": "dummy@dummy.io"
}
]
}
mock_fetch_tags.return_value = ["test", "", "", [b'\x61\x74\x6F\x6D\x65\x73\x65',
b'\x62\x69\x6F\x69\x6E\x66\x6F\x72\x6D\x61\x74\x69\x63\x73']]
mock_s3_push.return_value = "https://test-s3-push"
org_event_consumer = ServiceCreatedEventConsumer("wss://ropsten.infura.io/ws", "http://ipfs.singularitynet.io",
80)
org_event_consumer.on_event(event=event)
service = service_repository.get_service(org_id='snet', service_id='gene-annotation-service')
service_metadata = service_repository.get_service_metadata(org_id='snet', service_id='gene-annotation-service')
service_endpoints = service_repository.get_service_endpoints(org_id='snet',
service_id='gene-annotation-service')
service_tags = service_repository.get_service_tags(org_id='snet', service_id='gene-annotation-service')
assert service == {'org_id': 'snet', 'service_id': 'gene-annotation-service', 'service_path': None,
'ipfs_hash': 'QmdGjaVYPMSGpC1qT3LDALSNCCu7JPf7j51H1GQirvQJYf', 'is_curated': 0}
assert service_metadata == {'org_id': 'snet', 'service_id': 'gene-annotation-service',
'display_name': 'Annotation Service',
'description': 'Use this service to annotate a humane genome with uniform terms, Reactome pathway memberships, and BioGrid protein interactions.',
'short_description': 'short description',
'url': 'https://mozi-ai.github.io/annotation-service/', 'json': '',
'model_ipfs_hash': 'QmXqonxB9EvNBe11J8oCYXMQAtPKAb2x8CyFLmQpkvVaLf',
'encoding': 'proto', 'type': 'grpc',
'mpe_address': '0x8FB1dC8df86b388C7e00689d1eCb533A160B4D0C',
'assets_url': '{"hero_image": "https://test-s3-push"}',
'assets_hash': '{"hero_image": "QmVcE6fEDP764ibadXTjZHk251Lmt5xAxdc4P9mPA4kksk/hero_gene-annotation-2b.png"}',
'service_rating': '{"rating": 0.0, "total_users_rated": 0}', 'ranking': 1,
'contributors': '[{"name": "dummy dummy", "email_id": "dummy@dummy.io"}]'}
assert service_endpoints == [{'org_id': 'snet', 'service_id': 'gene-annotation-service',
'group_id': 'm5FKWq4hW0foGW5qSbzGSjgZRuKs7A1ZwbIrJ9e96rc=',
'endpoint': 'https://mozi.ai:8000'}]
assert service_tags == [{'org_id': 'snet', 'service_id': 'gene-annotation-service', 'tag_name': 'atomese'},
{'org_id': 'snet', 'service_id': 'gene-annotation-service',
'tag_name': 'bioinformatics'}]
| true
| true
|
790b4a221e2df9c50e46d7aa03425102110463da
| 1,337
|
py
|
Python
|
simplestatistics/statistics/harmonic_mean.py
|
tmcw-up-for-adoption/simple-statistics-py
|
aea5de1c4b853e746c67b44734027464650abe1c
|
[
"MIT"
] | 92
|
2016-05-12T20:16:29.000Z
|
2022-03-11T22:24:07.000Z
|
simplestatistics/statistics/harmonic_mean.py
|
sheriferson/simplestatistics
|
aea5de1c4b853e746c67b44734027464650abe1c
|
[
"MIT"
] | 6
|
2016-05-12T19:14:28.000Z
|
2017-11-17T02:07:58.000Z
|
simplestatistics/statistics/harmonic_mean.py
|
sheriferson/simple-statistics-py
|
aea5de1c4b853e746c67b44734027464650abe1c
|
[
"MIT"
] | 18
|
2016-05-13T16:30:23.000Z
|
2021-06-05T04:45:41.000Z
|
"""
Implements harmonic_mean() function.
"""
from .mean import mean
def harmonic_mean(x):
"""
The `harmonic mean`_ is a kind of average that is calculated as
the reciprocal_ of the arithmetic mean of the reciprocals.
It is appropriate when calculating averages of rates_.
.. _`harmonic mean`: https://en.wikipedia.org/wiki/Harmonic_mean
.. _reciprocal: https://en.wikipedia.org/wiki/Multiplicative_inverse
.. _rates: https://en.wikipedia.org/wiki/Rate_(mathematics)
Equation:
.. math::
H = \\frac{n}{\\frac{1}{x_1}+\\frac{1}{x_2}+\\ldots+\\frac{1}{x_n}} =
\\frac{n}{\\sum\\limits_{i=1}^n \\frac{1}{x_i}}
Args:
x: A list or tuple of numerical objects.
Returns:
A numerical object.
Raises:
TypeError: If the user passes something other than list or tuple.
Examples:
>>> harmonic_mean([1, 2, 4])
1.7142857142857142
>>> harmonic_mean(7)
Traceback (most recent call last):
...
TypeError: harmonic_mean() expects a list or a tuple.
"""
if type(x) not in [list, tuple]:
raise TypeError('harmonic_mean() expects a list or a tuple.')
reciprocals = [1 / float(num) for num in x]
# sum_of_reciprocals = sum(reciprocals[:])
return(1 / mean(reciprocals))
| 28.446809
| 81
| 0.617801
|
from .mean import mean
def harmonic_mean(x):
if type(x) not in [list, tuple]:
raise TypeError('harmonic_mean() expects a list or a tuple.')
reciprocals = [1 / float(num) for num in x]
return(1 / mean(reciprocals))
| true
| true
|
790b4a497c425262730b0b32075cdb46dedd2792
| 4,154
|
py
|
Python
|
exodus/exodus.py
|
UAMS-DBMI/PosdaTools
|
7d33605da1b88e4787a1368dbecaffda1df95e5b
|
[
"Apache-2.0"
] | 6
|
2019-01-17T15:47:44.000Z
|
2022-02-02T16:47:25.000Z
|
exodus/exodus.py
|
UAMS-DBMI/PosdaTools
|
7d33605da1b88e4787a1368dbecaffda1df95e5b
|
[
"Apache-2.0"
] | 23
|
2016-06-08T21:51:36.000Z
|
2022-03-02T08:11:44.000Z
|
exodus/exodus.py
|
UAMS-DBMI/PosdaTools
|
7d33605da1b88e4787a1368dbecaffda1df95e5b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3 -u
import json
import time
import os
import hashlib
import redis
from typing import NamedTuple
import requests
import psycopg2
USER=os.environ['EXODUS_USER']
PASS=os.environ['EXODUS_PASS']
RETRY_COUNT=int(os.environ['EXODUS_RETRY_COUNT'])
PSQL_DB_NAME=os.environ['EXODUS_PSQL_DB_NAME']
REDIS_HOST=os.environ['POSDA_REDIS_HOST']
class SubmitFailedError(RuntimeError): pass
class File(NamedTuple):
export_event_id: int
import_event_id: int
file_id: int
file_path: str
base_url: str
apikey: str
delete_after_transfer: int
def main_loop(redis_db, psql_db):
while True:
sr = redis_db.brpop("posda_to_posda_transfer", 5)
if sr is None:
continue
_, value = sr
file = File(*json.loads(value))
try:
submit_file(file)
update_success(psql_db, file.file_id, file.export_event_id)
except SubmitFailedError as e:
# probably should put this onto a failed-file list now?
print(e)
insert_errors(psql_db, file.file_id, file.export_event_id, e)
def update_success(psql_db, file_id, export_event_id):
try:
psql_db.execute("""
update file_export set
when_transferred = now(),
transfer_status = 'success'
where export_event_id = %s
and file_id = %s
""", [export_event_id, file_id])
except Exception as e:
print(e)
def insert_errors(psql_db, file_id, export_event_id, errors):
transfer_status_id = None
try:
psql_db.execute("""
insert into transfer_status
values (default, %s)
returning transfer_status_id
""", [str(errors)])
transfer_status_id, = psql_db.fetchone()
except psycopg2.IntegrityError:
psql_db.execute("""
select transfer_status_id
from transfer_status
where transfer_status_message = %s
""", [str(errors)])
transfer_status_id, = psql_db.fetchone()
if transfer_status_id is None:
print("Unable to create or get transfer_status_id for following error")
print(str(errors))
try:
psql_db.execute("""
update file_export set
when_transferred = now(),
transfer_status = 'failed permanent',
transfer_status_id = %s
where export_event_id = %s
and file_id = %s
""", [transfer_status_id, export_event_id, file_id])
except Exception as e:
print(e)
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def submit_file(file):
try:
params = {'import_event_id': file.import_event_id,
'digest': md5sum(file.file_path)}
headers = {}
if(file.apikey):
headers['apikey'] = file.apikey
with open(file.file_path, "rb") as infile:
req = requests.put(file.base_url + "/v1/import/file",
headers=headers,
params=params,
data=infile)
if req.status_code == 200:
print(file.file_id)
if(file.delete_after_transfer):
os.remove(file.file_path)
return
else:
raise SubmitFailedError((req.status_code, req.content))
except SubmitFailedError as e:
raise SubmitFailedError(("Failed to submit the file; error details follow", file, e))
except IOError as e:
raise SubmitFailedError(("Failed to open the file; error details follow", file, e))
def main():
print("exodus, starting up...")
redis_db = redis.StrictRedis(host=REDIS_HOST, db=0)
print("connected to redis")
psql_db_conn = psycopg2.connect(dbname=PSQL_DB_NAME)
psql_db_conn.autocommit = True
psql_db_cur = psql_db_conn.cursor()
print("connected to postgres")
main_loop(redis_db, psql_db_cur)
if __name__ == "__main__":
main()
| 29.884892
| 93
| 0.613866
|
import json
import time
import os
import hashlib
import redis
from typing import NamedTuple
import requests
import psycopg2
USER=os.environ['EXODUS_USER']
PASS=os.environ['EXODUS_PASS']
RETRY_COUNT=int(os.environ['EXODUS_RETRY_COUNT'])
PSQL_DB_NAME=os.environ['EXODUS_PSQL_DB_NAME']
REDIS_HOST=os.environ['POSDA_REDIS_HOST']
class SubmitFailedError(RuntimeError): pass
class File(NamedTuple):
export_event_id: int
import_event_id: int
file_id: int
file_path: str
base_url: str
apikey: str
delete_after_transfer: int
def main_loop(redis_db, psql_db):
while True:
sr = redis_db.brpop("posda_to_posda_transfer", 5)
if sr is None:
continue
_, value = sr
file = File(*json.loads(value))
try:
submit_file(file)
update_success(psql_db, file.file_id, file.export_event_id)
except SubmitFailedError as e:
print(e)
insert_errors(psql_db, file.file_id, file.export_event_id, e)
def update_success(psql_db, file_id, export_event_id):
try:
psql_db.execute("""
update file_export set
when_transferred = now(),
transfer_status = 'success'
where export_event_id = %s
and file_id = %s
""", [export_event_id, file_id])
except Exception as e:
print(e)
def insert_errors(psql_db, file_id, export_event_id, errors):
transfer_status_id = None
try:
psql_db.execute("""
insert into transfer_status
values (default, %s)
returning transfer_status_id
""", [str(errors)])
transfer_status_id, = psql_db.fetchone()
except psycopg2.IntegrityError:
psql_db.execute("""
select transfer_status_id
from transfer_status
where transfer_status_message = %s
""", [str(errors)])
transfer_status_id, = psql_db.fetchone()
if transfer_status_id is None:
print("Unable to create or get transfer_status_id for following error")
print(str(errors))
try:
psql_db.execute("""
update file_export set
when_transferred = now(),
transfer_status = 'failed permanent',
transfer_status_id = %s
where export_event_id = %s
and file_id = %s
""", [transfer_status_id, export_event_id, file_id])
except Exception as e:
print(e)
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
def submit_file(file):
try:
params = {'import_event_id': file.import_event_id,
'digest': md5sum(file.file_path)}
headers = {}
if(file.apikey):
headers['apikey'] = file.apikey
with open(file.file_path, "rb") as infile:
req = requests.put(file.base_url + "/v1/import/file",
headers=headers,
params=params,
data=infile)
if req.status_code == 200:
print(file.file_id)
if(file.delete_after_transfer):
os.remove(file.file_path)
return
else:
raise SubmitFailedError((req.status_code, req.content))
except SubmitFailedError as e:
raise SubmitFailedError(("Failed to submit the file; error details follow", file, e))
except IOError as e:
raise SubmitFailedError(("Failed to open the file; error details follow", file, e))
def main():
print("exodus, starting up...")
redis_db = redis.StrictRedis(host=REDIS_HOST, db=0)
print("connected to redis")
psql_db_conn = psycopg2.connect(dbname=PSQL_DB_NAME)
psql_db_conn.autocommit = True
psql_db_cur = psql_db_conn.cursor()
print("connected to postgres")
main_loop(redis_db, psql_db_cur)
if __name__ == "__main__":
main()
| true
| true
|
790b4abfcb0e688d48be5e849c857e886ddcb0bb
| 2,003
|
py
|
Python
|
Automatic extractive Text Summarization using RoBERTa/Deploy Flask app/app.py
|
ramachandra742/Text-Summarization-projects
|
80ff101ed399a2bbbbbcecf3316a1aa8cce3b516
|
[
"MIT"
] | null | null | null |
Automatic extractive Text Summarization using RoBERTa/Deploy Flask app/app.py
|
ramachandra742/Text-Summarization-projects
|
80ff101ed399a2bbbbbcecf3316a1aa8cce3b516
|
[
"MIT"
] | null | null | null |
Automatic extractive Text Summarization using RoBERTa/Deploy Flask app/app.py
|
ramachandra742/Text-Summarization-projects
|
80ff101ed399a2bbbbbcecf3316a1aa8cce3b516
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from flask import Flask,render_template,url_for,request
from text_summarization import text_summarizer
import time
import spacy
nlp = spacy.load('en_core_web_sm')
app = Flask(__name__)
# Web Scraping Pkg
from bs4 import BeautifulSoup
# from urllib.request import urlopen
from urllib.request import urlopen
# Reading Time
def readingTime(mytext):
total_words = len([ token.text for token in nlp(mytext)])
estimatedTime = total_words/200.0
return estimatedTime
# Fetch Text From Url
def get_text(url):
page = urlopen(url)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p:p.text,soup.find_all('p')))
return fetched_text
@app.route('/')
def index():
return render_template('index.html')
@app.route('/analyze',methods=['GET','POST'])
def analyze():
start = time.time()
if request.method == 'POST':
rawtext = request.form['rawtext']
final_reading_time = readingTime(rawtext)
final_summary = text_summarizer(rawtext)
summary_reading_time = readingTime(final_summary)
end = time.time()
final_time = end-start
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/analyze_url',methods=['GET','POST'])
def analyze_url():
start = time.time()
if request.method == 'POST':
raw_url = request.form['raw_url']
rawtext = get_text(raw_url)
final_reading_time = readingTime(rawtext)
final_summary = text_summarizer(rawtext)
summary_reading_time = readingTime(final_summary)
end = time.time()
final_time = end-start
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/about')
def about():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
| 31.793651
| 182
| 0.751872
|
from __future__ import unicode_literals
from flask import Flask,render_template,url_for,request
from text_summarization import text_summarizer
import time
import spacy
nlp = spacy.load('en_core_web_sm')
app = Flask(__name__)
from bs4 import BeautifulSoup
from urllib.request import urlopen
def readingTime(mytext):
total_words = len([ token.text for token in nlp(mytext)])
estimatedTime = total_words/200.0
return estimatedTime
def get_text(url):
page = urlopen(url)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p:p.text,soup.find_all('p')))
return fetched_text
@app.route('/')
def index():
return render_template('index.html')
@app.route('/analyze',methods=['GET','POST'])
def analyze():
start = time.time()
if request.method == 'POST':
rawtext = request.form['rawtext']
final_reading_time = readingTime(rawtext)
final_summary = text_summarizer(rawtext)
summary_reading_time = readingTime(final_summary)
end = time.time()
final_time = end-start
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/analyze_url',methods=['GET','POST'])
def analyze_url():
start = time.time()
if request.method == 'POST':
raw_url = request.form['raw_url']
rawtext = get_text(raw_url)
final_reading_time = readingTime(rawtext)
final_summary = text_summarizer(rawtext)
summary_reading_time = readingTime(final_summary)
end = time.time()
final_time = end-start
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/about')
def about():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
| true
| true
|
790b4b2cde73f57c55c82e3db02ef2939283d73a
| 1,586
|
py
|
Python
|
2021/day4_part1.py
|
rogall-e/advent_of_code
|
a8c41fb63478dd7b99c88c4fa99b7ba0bab3842d
|
[
"MIT"
] | null | null | null |
2021/day4_part1.py
|
rogall-e/advent_of_code
|
a8c41fb63478dd7b99c88c4fa99b7ba0bab3842d
|
[
"MIT"
] | null | null | null |
2021/day4_part1.py
|
rogall-e/advent_of_code
|
a8c41fb63478dd7b99c88c4fa99b7ba0bab3842d
|
[
"MIT"
] | null | null | null |
import numpy as np # import numpy
with open("data/day4.txt") as f:
drawing_numbers = f.readline()
board_lst = []
board_line = []
counter = 0
for line in f:
if line != '\n':
board_line.append(line.strip())
if len(board_line) == 5:
board_lst.append(board_line)
board_line = []
drawing_numbers = drawing_numbers.strip().split(',')
def create_board(board_lst):
board_array = []
for item in board_lst:
board = [x for x in item.split(' ') if x.strip() != '']
board_array.append(board)
board_array = np.array(board_array)
board_array = board_array.astype(float)
return board_array
def check_winning(board_lst, number_lst):
winning_condition = {
'Answer': 0,
'counter': 625
}
for item in board_lst:
board = create_board(item)
counter=0
for number in number_lst:
number = float(number)
counter += 1
if number in board:
result = np.where(board == number)
board[int(result[0])][int(result[1])] = np.nan
if np.all(np.isnan(board), axis=1).any() or np.all(np.isnan(board), axis=0).any():
if counter < winning_condition['counter']:
winning_condition['counter'] = counter
winning_condition['Answer'] = number * np.nansum(board)
print('The Answer is:', winning_condition)
check_winning(board_lst, drawing_numbers)
| 31.72
| 94
| 0.552333
|
import numpy as np
with open("data/day4.txt") as f:
drawing_numbers = f.readline()
board_lst = []
board_line = []
counter = 0
for line in f:
if line != '\n':
board_line.append(line.strip())
if len(board_line) == 5:
board_lst.append(board_line)
board_line = []
drawing_numbers = drawing_numbers.strip().split(',')
def create_board(board_lst):
board_array = []
for item in board_lst:
board = [x for x in item.split(' ') if x.strip() != '']
board_array.append(board)
board_array = np.array(board_array)
board_array = board_array.astype(float)
return board_array
def check_winning(board_lst, number_lst):
winning_condition = {
'Answer': 0,
'counter': 625
}
for item in board_lst:
board = create_board(item)
counter=0
for number in number_lst:
number = float(number)
counter += 1
if number in board:
result = np.where(board == number)
board[int(result[0])][int(result[1])] = np.nan
if np.all(np.isnan(board), axis=1).any() or np.all(np.isnan(board), axis=0).any():
if counter < winning_condition['counter']:
winning_condition['counter'] = counter
winning_condition['Answer'] = number * np.nansum(board)
print('The Answer is:', winning_condition)
check_winning(board_lst, drawing_numbers)
| true
| true
|
790b4ba261fe601b64808fc5eebd5c2badfc475e
| 5,824
|
py
|
Python
|
cron/email_receive.py
|
dotskapes/dotSkapes
|
50228926f42a9c3bbc050b08922342a83a974755
|
[
"MIT"
] | 1
|
2016-01-01T12:22:48.000Z
|
2016-01-01T12:22:48.000Z
|
cron/email_receive.py
|
ptressel/sahana-eden-madpub
|
b16418b36d0fb781fd045f7e7edd1a30259a1f35
|
[
"MIT"
] | 1
|
2016-03-11T06:05:39.000Z
|
2016-03-11T06:05:39.000Z
|
cron/email_receive.py
|
dotskapes/dotSkapes
|
50228926f42a9c3bbc050b08922342a83a974755
|
[
"MIT"
] | 1
|
2020-04-29T13:58:31.000Z
|
2020-04-29T13:58:31.000Z
|
# -*- coding: utf-8 -*-
# This is a simple mailbox polling script for the Sahana Messaging Module
# If there is a need to collect from non-compliant mailers then suggest using the robust Fetchmail to collect & store in a more compliant mailer!
# This script doesn't handle MIME attachments
import sys, socket, email, uuid
# Read-in configuration from Database
settings = db(db.msg_email_settings.id == 1).select(limitby=(0, 1)).first()
host = settings.inbound_mail_server
server_type = settings.inbound_mail_type
ssl = settings.inbound_mail_ssl
port = settings.inbound_mail_port
username = settings.inbound_mail_username
password = settings.inbound_mail_password
delete = settings.inbound_mail_delete
if server_type == "pop3":
import poplib
# http://docs.python.org/library/poplib.html
try:
if ssl:
p = poplib.POP3_SSL(host, port)
else:
p = poplib.POP3(host, port)
except socket.error, e:
error = "Cannot connect: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
try:
# Attempting APOP authentication...
p.apop(username, password)
except poplib.error_proto:
# Attempting standard authentication...
try:
p.user(username)
p.pass_(password)
except poplib.error_proto, e:
print "Login failed:", e
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status="Login failed: %s" % e)
except:
db.msg_email_inbound_status.insert(status="Login failed: %s" % e)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
dellist = []
mblist = p.list()[1]
for item in mblist:
number, octets = item.split(" ")
# Retrieve the message (storing it in a list of lines)
lines = p.retr(number)[1]
# Create an e-mail object representing the message
msg = email.message_from_string("\n".join(lines))
# Parse out the 'From' Header
sender = msg["from"]
# Parse out the 'Subject' Header
if "subject" in msg:
subject = msg["subject"]
else:
subject = ""
# Parse out the 'Body'
textParts = msg.get_payload()
body = textParts[0].get_payload()
# Store in DB
uuidstamp = uuid.uuid4()
db.msg_email_inbox.insert(uuid=uuidstamp, sender=sender, subject=subject, body=body)
if delete:
# Add it to the list of messages to delete later
dellist.append(number)
# Explicitly commit DB operations when running from Cron
db.commit()
# Iterate over the list of messages to delete
for number in dellist:
p.dele(number)
p.quit()
elif server_type == "imap":
import imaplib
# http://docs.python.org/library/imaplib.html
try:
if ssl:
M = imaplib.IMAP4_SSL(host, port)
else:
M = imaplib.IMAP4(host, port)
except socket.error, e:
error = "Cannot connect: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
try:
M.login(username, password)
except M.error, e:
error = "Login failed: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
dellist = []
# Select inbox
M.select()
# Search for Messages to Download
typ, data = M.search(None, "ALL")
for num in data[0].split():
typ, msg_data = M.fetch(num, "(RFC822)")
for response_part in msg_data:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1])
# Parse out the 'From' Header
sender = msg["from"]
# Parse out the 'Subject' Header
if "subject" in msg:
subject = msg["subject"]
else:
subject = ""
# Parse out the 'Body'
textParts = msg.get_payload()
body = textParts[0].get_payload()
# Store in DB
uuidstamp = uuid.uuid4()
db.msg_email_inbox.insert(uuid=uuidstamp, sender=sender, subject=subject, body=body)
if delete:
# Add it to the list of messages to delete later
dellist.append(num)
# Explicitly commit DB operations when running from Cron
db.commit()
# Iterate over the list of messages to delete
for number in dellist:
typ, response = M.store(number, "+FLAGS", r"(\Deleted)")
M.close()
M.logout()
| 37.333333
| 145
| 0.589973
|
import sys, socket, email, uuid
# Read-in configuration from Database
settings = db(db.msg_email_settings.id == 1).select(limitby=(0, 1)).first()
host = settings.inbound_mail_server
server_type = settings.inbound_mail_type
ssl = settings.inbound_mail_ssl
port = settings.inbound_mail_port
username = settings.inbound_mail_username
password = settings.inbound_mail_password
delete = settings.inbound_mail_delete
if server_type == "pop3":
import poplib
# http://docs.python.org/library/poplib.html
try:
if ssl:
p = poplib.POP3_SSL(host, port)
else:
p = poplib.POP3(host, port)
except socket.error, e:
error = "Cannot connect: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
try:
# Attempting APOP authentication...
p.apop(username, password)
except poplib.error_proto:
# Attempting standard authentication...
try:
p.user(username)
p.pass_(password)
except poplib.error_proto, e:
print "Login failed:", e
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status="Login failed: %s" % e)
except:
db.msg_email_inbound_status.insert(status="Login failed: %s" % e)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
dellist = []
mblist = p.list()[1]
for item in mblist:
number, octets = item.split(" ")
# Retrieve the message (storing it in a list of lines)
lines = p.retr(number)[1]
# Create an e-mail object representing the message
msg = email.message_from_string("\n".join(lines))
# Parse out the 'From' Header
sender = msg["from"]
# Parse out the 'Subject' Header
if "subject" in msg:
subject = msg["subject"]
else:
subject = ""
# Parse out the 'Body'
textParts = msg.get_payload()
body = textParts[0].get_payload()
# Store in DB
uuidstamp = uuid.uuid4()
db.msg_email_inbox.insert(uuid=uuidstamp, sender=sender, subject=subject, body=body)
if delete:
# Add it to the list of messages to delete later
dellist.append(number)
# Explicitly commit DB operations when running from Cron
db.commit()
# Iterate over the list of messages to delete
for number in dellist:
p.dele(number)
p.quit()
elif server_type == "imap":
import imaplib
# http://docs.python.org/library/imaplib.html
try:
if ssl:
M = imaplib.IMAP4_SSL(host, port)
else:
M = imaplib.IMAP4(host, port)
except socket.error, e:
error = "Cannot connect: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
try:
M.login(username, password)
except M.error, e:
error = "Login failed: %s" % e
print error
# Store status in the DB
try:
id = db().select(db.msg_email_inbound_status.id, limitby=(0, 1)).first().id
db(db.msg_email_inbound_status.id == id).update(status=error)
except:
db.msg_email_inbound_status.insert(status=error)
# Explicitly commit DB operations when running from Cron
db.commit()
sys.exit(1)
dellist = []
# Select inbox
M.select()
# Search for Messages to Download
typ, data = M.search(None, "ALL")
for num in data[0].split():
typ, msg_data = M.fetch(num, "(RFC822)")
for response_part in msg_data:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1])
# Parse out the 'From' Header
sender = msg["from"]
# Parse out the 'Subject' Header
if "subject" in msg:
subject = msg["subject"]
else:
subject = ""
# Parse out the 'Body'
textParts = msg.get_payload()
body = textParts[0].get_payload()
# Store in DB
uuidstamp = uuid.uuid4()
db.msg_email_inbox.insert(uuid=uuidstamp, sender=sender, subject=subject, body=body)
if delete:
# Add it to the list of messages to delete later
dellist.append(num)
# Explicitly commit DB operations when running from Cron
db.commit()
# Iterate over the list of messages to delete
for number in dellist:
typ, response = M.store(number, "+FLAGS", r"(\Deleted)")
M.close()
M.logout()
| false
| true
|
790b4bdad6cc5361a652e0c4a545d443b372a6d5
| 24,225
|
py
|
Python
|
sympy/physics/hep/gamma_matrices.py
|
ricopicone/sympy
|
de27c97214d540247a35c8215c7920e9a46b54ed
|
[
"BSD-3-Clause"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
Library/lib/python3.7/site-packages/sympy/physics/hep/gamma_matrices.py
|
gengyong/Carnets
|
8930a14f69360d4db115a85ff9e0f6efa80fa2e7
|
[
"BSD-3-Clause"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
Library/lib/python3.7/site-packages/sympy/physics/hep/gamma_matrices.py
|
gengyong/Carnets
|
8930a14f69360d4db115a85ff9e0f6efa80fa2e7
|
[
"BSD-3-Clause"
] | 31
|
2019-03-10T09:51:27.000Z
|
2022-02-14T23:11:12.000Z
|
"""
Module to handle gamma matrices expressed as tensor objects.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> G(i)
GammaMatrix(i)
Note that there is already an instance of GammaMatrixHead in four dimensions:
GammaMatrix, which is simply declare as
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', LorentzIndex)
>>> GammaMatrix(i)
GammaMatrix(i)
To access the metric tensor
>>> LorentzIndex.metric
metric(LorentzIndex,LorentzIndex)
"""
from sympy import S, Mul, eye, trace
from sympy.tensor.tensor import TensorIndexType, TensorIndex,\
TensMul, TensAdd, tensor_mul, Tensor, TensorHead, TensorSymmetry
from sympy.core.compatibility import range
# DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_fmt="S")
LorentzIndex = TensorIndexType('LorentzIndex', dim=4, dummy_fmt="L")
GammaMatrix = TensorHead("GammaMatrix", [LorentzIndex],
TensorSymmetry.no_symmetry(1), comm=None)
def extract_type_tens(expression, component):
"""
Extract from a ``TensExpr`` all tensors with `component`.
Returns two tensor expressions:
* the first contains all ``Tensor`` of having `component`.
* the second contains all remaining.
"""
if isinstance(expression, Tensor):
sp = [expression]
elif isinstance(expression, TensMul):
sp = expression.args
else:
raise ValueError('wrong type')
# Collect all gamma matrices of the same dimension
new_expr = S.One
residual_expr = S.One
for i in sp:
if isinstance(i, Tensor) and i.component == component:
new_expr *= i
else:
residual_expr *= i
return new_expr, residual_expr
def simplify_gamma_expression(expression):
extracted_expr, residual_expr = extract_type_tens(expression, GammaMatrix)
res_expr = _simplify_single_line(extracted_expr)
return res_expr * residual_expr
def simplify_gpgp(ex, sort=True):
"""
simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)``
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, simplify_gpgp
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> simplify_gpgp(ps*qs*qs)
GammaMatrix(-L_0)*p(L_0)*q(L_1)*q(-L_1)
"""
def _simplify_gpgp(ex):
components = ex.components
a = []
comp_map = []
for i, comp in enumerate(components):
comp_map.extend([i]*comp.rank)
dum = [(i[0], i[1], comp_map[i[0]], comp_map[i[1]]) for i in ex.dum]
for i in range(len(components)):
if components[i] != GammaMatrix:
continue
for dx in dum:
if dx[2] == i:
p_pos1 = dx[3]
elif dx[3] == i:
p_pos1 = dx[2]
else:
continue
comp1 = components[p_pos1]
if comp1.comm == 0 and comp1.rank == 1:
a.append((i, p_pos1))
if not a:
return ex
elim = set()
tv = []
hit = True
coeff = S.One
ta = None
while hit:
hit = False
for i, ai in enumerate(a[:-1]):
if ai[0] in elim:
continue
if ai[0] != a[i + 1][0] - 1:
continue
if components[ai[1]] != components[a[i + 1][1]]:
continue
elim.add(ai[0])
elim.add(ai[1])
elim.add(a[i + 1][0])
elim.add(a[i + 1][1])
if not ta:
ta = ex.split()
mu = TensorIndex('mu', LorentzIndex)
hit = True
if i == 0:
coeff = ex.coeff
tx = components[ai[1]](mu)*components[ai[1]](-mu)
if len(a) == 2:
tx *= 4 # eye(4)
tv.append(tx)
break
if tv:
a = [x for j, x in enumerate(ta) if j not in elim]
a.extend(tv)
t = tensor_mul(*a)*coeff
# t = t.replace(lambda x: x.is_Matrix, lambda x: 1)
return t
else:
return ex
if sort:
ex = ex.sorted_components()
# this would be better off with pattern matching
while 1:
t = _simplify_gpgp(ex)
if t != ex:
ex = t
else:
return t
def gamma_trace(t):
"""
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
gamma_trace, LorentzIndex
>>> from sympy.tensor.tensor import tensor_indices, tensor_heads
>>> p, q = tensor_heads('p, q', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
"""
if isinstance(t, TensAdd):
res = TensAdd(*[_trace_single_line(x) for x in t.args])
return res
t = _simplify_single_line(t)
res = _trace_single_line(t)
return res
def _simplify_single_line(expression):
"""
Simplify single-line product of gamma matrices.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, _simplify_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1 = tensor_indices('i0:2', LorentzIndex)
>>> _simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0)
0
"""
t1, t2 = extract_type_tens(expression, GammaMatrix)
if t1 != 1:
t1 = kahane_simplify(t1)
res = t1*t2
return res
def _trace_single_line(t):
"""
Evaluate the trace of a single gamma matrix line inside a ``TensExpr``.
Notes
=====
If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right``
indices trace over them; otherwise traces are not implied (explain)
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \
LorentzIndex, _trace_single_line
>>> from sympy.tensor.tensor import tensor_indices, TensorHead
>>> p = TensorHead('p', [LorentzIndex])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex)
>>> _trace_single_line(G(i0)*G(i1))
4*metric(i0, i1)
>>> _trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0)
0
"""
def _trace_single_line1(t):
t = t.sorted_components()
components = t.components
ncomps = len(components)
g = LorentzIndex.metric
# gamma matirices are in a[i:j]
hit = 0
for i in range(ncomps):
if components[i] == GammaMatrix:
hit = 1
break
for j in range(i + hit, ncomps):
if components[j] != GammaMatrix:
break
else:
j = ncomps
numG = j - i
if numG == 0:
tcoeff = t.coeff
return t.nocoeff if tcoeff else t
if numG % 2 == 1:
return TensMul.from_data(S.Zero, [], [], [])
elif numG > 4:
# find the open matrix indices and connect them:
a = t.split()
ind1 = a[i].get_indices()[0]
ind2 = a[i + 1].get_indices()[0]
aa = a[:i] + a[i + 2:]
t1 = tensor_mul(*aa)*g(ind1, ind2)
t1 = t1.contract_metric(g)
args = [t1]
sign = 1
for k in range(i + 2, j):
sign = -sign
ind2 = a[k].get_indices()[0]
aa = a[:i] + a[i + 1:k] + a[k + 1:]
t2 = sign*tensor_mul(*aa)*g(ind1, ind2)
t2 = t2.contract_metric(g)
t2 = simplify_gpgp(t2, False)
args.append(t2)
t3 = TensAdd(*args)
t3 = _trace_single_line(t3)
return t3
else:
a = t.split()
t1 = _gamma_trace1(*a[i:j])
a2 = a[:i] + a[j:]
t2 = tensor_mul(*a2)
t3 = t1*t2
if not t3:
return t3
t3 = t3.contract_metric(g)
return t3
t = t.expand()
if isinstance(t, TensAdd):
a = [_trace_single_line1(x)*x.coeff for x in t.args]
return TensAdd(*a)
elif isinstance(t, (Tensor, TensMul)):
r = t.coeff*_trace_single_line1(t)
return r
else:
return trace(t)
def _gamma_trace1(*a):
gctr = 4 # FIXME specific for d=4
g = LorentzIndex.metric
if not a:
return gctr
n = len(a)
if n%2 == 1:
#return TensMul.from_data(S.Zero, [], [], [])
return S.Zero
if n == 2:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
return gctr*g(ind0, ind1)
if n == 4:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
ind2 = a[2].get_indices()[0]
ind3 = a[3].get_indices()[0]
return gctr*(g(ind0, ind1)*g(ind2, ind3) - \
g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2))
def kahane_simplify(expression):
r"""
This function cancels contracted elements in a product of four
dimensional gamma matrices, resulting in an expression equal to the given
one, without the contracted gamma matrices.
Parameters
==========
`expression` the tensor expression containing the gamma matrices to simplify.
Notes
=====
If spinor indices are given, the matrices must be given in
the order given in the product.
Algorithm
=========
The idea behind the algorithm is to use some well-known identities,
i.e., for contractions enclosing an even number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )`
for an odd number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}`
Instead of repeatedly applying these identities to cancel out all contracted indices,
it is possible to recognize the links that would result from such an operation,
the problem is thus reduced to a simple rearrangement of free gamma matrices.
Examples
========
When using, always remember that the original expression coefficient
has to be handled separately
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, LorentzIndex
>>> from sympy.physics.hep.gamma_matrices import kahane_simplify
>>> from sympy.tensor.tensor import tensor_indices
>>> i0, i1, i2 = tensor_indices('i0:3', LorentzIndex)
>>> ta = G(i0)*G(-i0)
>>> kahane_simplify(ta)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> tb = G(i0)*G(i1)*G(-i0)
>>> kahane_simplify(tb)
-2*GammaMatrix(i1)
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
>>> t = G(i0)*G(-i0)
>>> kahane_simplify(t)
Matrix([
[4, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 4]])
If there are no contractions, the same expression is returned
>>> tc = G(i0)*G(i1)
>>> kahane_simplify(tc)
GammaMatrix(i0)*GammaMatrix(i1)
References
==========
[1] Algorithm for Reducing Contracted Products of gamma Matrices,
Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968.
"""
if isinstance(expression, Mul):
return expression
if isinstance(expression, TensAdd):
return TensAdd(*[kahane_simplify(arg) for arg in expression.args])
if isinstance(expression, Tensor):
return expression
assert isinstance(expression, TensMul)
gammas = expression.args
for gamma in gammas:
assert gamma.component == GammaMatrix
free = expression.free
# spinor_free = [_ for _ in expression.free_in_args if _[1] != 0]
# if len(spinor_free) == 2:
# spinor_free.sort(key=lambda x: x[2])
# assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2
# assert spinor_free[0][2] == 0
# elif spinor_free:
# raise ValueError('spinor indices do not match')
dum = []
for dum_pair in expression.dum:
if expression.index_types[dum_pair[0]] == LorentzIndex:
dum.append((dum_pair[0], dum_pair[1]))
dum = sorted(dum)
if len(dum) == 0: # or GammaMatrixHead:
# no contractions in `expression`, just return it.
return expression
# find the `first_dum_pos`, i.e. the position of the first contracted
# gamma matrix, Kahane's algorithm as described in his paper requires the
# gamma matrix expression to start with a contracted gamma matrix, this is
# a workaround which ignores possible initial free indices, and re-adds
# them later.
first_dum_pos = min(map(min, dum))
# for p1, p2, a1, a2 in expression.dum_in_args:
# if p1 != 0 or p2 != 0:
# # only Lorentz indices, skip Dirac indices:
# continue
# first_dum_pos = min(p1, p2)
# break
total_number = len(free) + len(dum)*2
number_of_contractions = len(dum)
free_pos = [None]*total_number
for i in free:
free_pos[i[1]] = i[0]
# `index_is_free` is a list of booleans, to identify index position
# and whether that index is free or dummy.
index_is_free = [False]*total_number
for i, indx in enumerate(free):
index_is_free[indx[1]] = True
# `links` is a dictionary containing the graph described in Kahane's paper,
# to every key correspond one or two values, representing the linked indices.
# All values in `links` are integers, negative numbers are used in the case
# where it is necessary to insert gamma matrices between free indices, in
# order to make Kahane's algorithm work (see paper).
links = dict()
for i in range(first_dum_pos, total_number):
links[i] = []
# `cum_sign` is a step variable to mark the sign of every index, see paper.
cum_sign = -1
# `cum_sign_list` keeps storage for all `cum_sign` (every index).
cum_sign_list = [None]*total_number
block_free_count = 0
# multiply `resulting_coeff` by the coefficient parameter, the rest
# of the algorithm ignores a scalar coefficient.
resulting_coeff = S.One
# initialize a list of lists of indices. The outer list will contain all
# additive tensor expressions, while the inner list will contain the
# free indices (rearranged according to the algorithm).
resulting_indices = [[]]
# start to count the `connected_components`, which together with the number
# of contractions, determines a -1 or +1 factor to be multiplied.
connected_components = 1
# First loop: here we fill `cum_sign_list`, and draw the links
# among consecutive indices (they are stored in `links`). Links among
# non-consecutive indices will be drawn later.
for i, is_free in enumerate(index_is_free):
# if `expression` starts with free indices, they are ignored here;
# they are later added as they are to the beginning of all
# `resulting_indices` list of lists of indices.
if i < first_dum_pos:
continue
if is_free:
block_free_count += 1
# if previous index was free as well, draw an arch in `links`.
if block_free_count > 1:
links[i - 1].append(i)
links[i].append(i - 1)
else:
# Change the sign of the index (`cum_sign`) if the number of free
# indices preceding it is even.
cum_sign *= 1 if (block_free_count % 2) else -1
if block_free_count == 0 and i != first_dum_pos:
# check if there are two consecutive dummy indices:
# in this case create virtual indices with negative position,
# these "virtual" indices represent the insertion of two
# gamma^0 matrices to separate consecutive dummy indices, as
# Kahane's algorithm requires dummy indices to be separated by
# free indices. The product of two gamma^0 matrices is unity,
# so the new expression being examined is the same as the
# original one.
if cum_sign == -1:
links[-1-i] = [-1-i+1]
links[-1-i+1] = [-1-i]
if (i - cum_sign) in links:
if i != first_dum_pos:
links[i].append(i - cum_sign)
if block_free_count != 0:
if i - cum_sign < len(index_is_free):
if index_is_free[i - cum_sign]:
links[i - cum_sign].append(i)
block_free_count = 0
cum_sign_list[i] = cum_sign
# The previous loop has only created links between consecutive free indices,
# it is necessary to properly create links among dummy (contracted) indices,
# according to the rules described in Kahane's paper. There is only one exception
# to Kahane's rules: the negative indices, which handle the case of some
# consecutive free indices (Kahane's paper just describes dummy indices
# separated by free indices, hinting that free indices can be added without
# altering the expression result).
for i in dum:
# get the positions of the two contracted indices:
pos1 = i[0]
pos2 = i[1]
# create Kahane's upper links, i.e. the upper arcs between dummy
# (i.e. contracted) indices:
links[pos1].append(pos2)
links[pos2].append(pos1)
# create Kahane's lower links, this corresponds to the arcs below
# the line described in the paper:
# first we move `pos1` and `pos2` according to the sign of the indices:
linkpos1 = pos1 + cum_sign_list[pos1]
linkpos2 = pos2 + cum_sign_list[pos2]
# otherwise, perform some checks before creating the lower arcs:
# make sure we are not exceeding the total number of indices:
if linkpos1 >= total_number:
continue
if linkpos2 >= total_number:
continue
# make sure we are not below the first dummy index in `expression`:
if linkpos1 < first_dum_pos:
continue
if linkpos2 < first_dum_pos:
continue
# check if the previous loop created "virtual" indices between dummy
# indices, in such a case relink `linkpos1` and `linkpos2`:
if (-1-linkpos1) in links:
linkpos1 = -1-linkpos1
if (-1-linkpos2) in links:
linkpos2 = -1-linkpos2
# move only if not next to free index:
if linkpos1 >= 0 and not index_is_free[linkpos1]:
linkpos1 = pos1
if linkpos2 >=0 and not index_is_free[linkpos2]:
linkpos2 = pos2
# create the lower arcs:
if linkpos2 not in links[linkpos1]:
links[linkpos1].append(linkpos2)
if linkpos1 not in links[linkpos2]:
links[linkpos2].append(linkpos1)
# This loop starts from the `first_dum_pos` index (first dummy index)
# walks through the graph deleting the visited indices from `links`,
# it adds a gamma matrix for every free index in encounters, while it
# completely ignores dummy indices and virtual indices.
pointer = first_dum_pos
previous_pointer = 0
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
else:
break
if pointer == previous_pointer:
break
if pointer >=0 and free_pos[pointer] is not None:
for ri in resulting_indices:
ri.append(free_pos[pointer])
# The following loop removes the remaining connected components in `links`.
# If there are free indices inside a connected component, it gives a
# contribution to the resulting expression given by the factor
# `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's
# paper represented as {gamma_a, gamma_b, ... , gamma_z},
# virtual indices are ignored. The variable `connected_components` is
# increased by one for every connected component this loop encounters.
# If the connected component has virtual and dummy indices only
# (no free indices), it contributes to `resulting_indices` by a factor of two.
# The multiplication by two is a result of the
# factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper.
# Note: curly brackets are meant as in the paper, as a generalized
# multi-element anticommutator!
while links:
connected_components += 1
pointer = min(links.keys())
previous_pointer = pointer
# the inner loop erases the visited indices from `links`, and it adds
# all free indices to `prepend_indices` list, virtual indices are
# ignored.
prepend_indices = []
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
if len(next_ones) > 1:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
if pointer >= first_dum_pos and free_pos[pointer] is not None:
prepend_indices.insert(0, free_pos[pointer])
# if `prepend_indices` is void, it means there are no free indices
# in the loop (and it can be shown that there must be a virtual index),
# loops of virtual indices only contribute by a factor of two:
if len(prepend_indices) == 0:
resulting_coeff *= 2
# otherwise, add the free indices in `prepend_indices` to
# the `resulting_indices`:
else:
expr1 = prepend_indices
expr2 = list(reversed(prepend_indices))
resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)]
# sign correction, as described in Kahane's paper:
resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1
# power of two factor, as described in Kahane's paper:
resulting_coeff *= 2**(number_of_contractions)
# If `first_dum_pos` is not zero, it means that there are trailing free gamma
# matrices in front of `expression`, so multiply by them:
for i in range(0, first_dum_pos):
[ri.insert(0, free_pos[i]) for ri in resulting_indices]
resulting_expr = S.Zero
for i in resulting_indices:
temp_expr = S.One
for j in i:
temp_expr *= GammaMatrix(j)
resulting_expr += temp_expr
t = resulting_coeff * resulting_expr
t1 = None
if isinstance(t, TensAdd):
t1 = t.args[0]
elif isinstance(t, TensMul):
t1 = t
if t1:
pass
else:
t = eye(4)*t
return t
| 33.739554
| 180
| 0.588483
|
from sympy import S, Mul, eye, trace
from sympy.tensor.tensor import TensorIndexType, TensorIndex,\
TensMul, TensAdd, tensor_mul, Tensor, TensorHead, TensorSymmetry
from sympy.core.compatibility import range
LorentzIndex = TensorIndexType('LorentzIndex', dim=4, dummy_fmt="L")
GammaMatrix = TensorHead("GammaMatrix", [LorentzIndex],
TensorSymmetry.no_symmetry(1), comm=None)
def extract_type_tens(expression, component):
if isinstance(expression, Tensor):
sp = [expression]
elif isinstance(expression, TensMul):
sp = expression.args
else:
raise ValueError('wrong type')
new_expr = S.One
residual_expr = S.One
for i in sp:
if isinstance(i, Tensor) and i.component == component:
new_expr *= i
else:
residual_expr *= i
return new_expr, residual_expr
def simplify_gamma_expression(expression):
extracted_expr, residual_expr = extract_type_tens(expression, GammaMatrix)
res_expr = _simplify_single_line(extracted_expr)
return res_expr * residual_expr
def simplify_gpgp(ex, sort=True):
def _simplify_gpgp(ex):
components = ex.components
a = []
comp_map = []
for i, comp in enumerate(components):
comp_map.extend([i]*comp.rank)
dum = [(i[0], i[1], comp_map[i[0]], comp_map[i[1]]) for i in ex.dum]
for i in range(len(components)):
if components[i] != GammaMatrix:
continue
for dx in dum:
if dx[2] == i:
p_pos1 = dx[3]
elif dx[3] == i:
p_pos1 = dx[2]
else:
continue
comp1 = components[p_pos1]
if comp1.comm == 0 and comp1.rank == 1:
a.append((i, p_pos1))
if not a:
return ex
elim = set()
tv = []
hit = True
coeff = S.One
ta = None
while hit:
hit = False
for i, ai in enumerate(a[:-1]):
if ai[0] in elim:
continue
if ai[0] != a[i + 1][0] - 1:
continue
if components[ai[1]] != components[a[i + 1][1]]:
continue
elim.add(ai[0])
elim.add(ai[1])
elim.add(a[i + 1][0])
elim.add(a[i + 1][1])
if not ta:
ta = ex.split()
mu = TensorIndex('mu', LorentzIndex)
hit = True
if i == 0:
coeff = ex.coeff
tx = components[ai[1]](mu)*components[ai[1]](-mu)
if len(a) == 2:
tx *= 4
tv.append(tx)
break
if tv:
a = [x for j, x in enumerate(ta) if j not in elim]
a.extend(tv)
t = tensor_mul(*a)*coeff
return t
else:
return ex
if sort:
ex = ex.sorted_components()
while 1:
t = _simplify_gpgp(ex)
if t != ex:
ex = t
else:
return t
def gamma_trace(t):
if isinstance(t, TensAdd):
res = TensAdd(*[_trace_single_line(x) for x in t.args])
return res
t = _simplify_single_line(t)
res = _trace_single_line(t)
return res
def _simplify_single_line(expression):
t1, t2 = extract_type_tens(expression, GammaMatrix)
if t1 != 1:
t1 = kahane_simplify(t1)
res = t1*t2
return res
def _trace_single_line(t):
def _trace_single_line1(t):
t = t.sorted_components()
components = t.components
ncomps = len(components)
g = LorentzIndex.metric
hit = 0
for i in range(ncomps):
if components[i] == GammaMatrix:
hit = 1
break
for j in range(i + hit, ncomps):
if components[j] != GammaMatrix:
break
else:
j = ncomps
numG = j - i
if numG == 0:
tcoeff = t.coeff
return t.nocoeff if tcoeff else t
if numG % 2 == 1:
return TensMul.from_data(S.Zero, [], [], [])
elif numG > 4:
a = t.split()
ind1 = a[i].get_indices()[0]
ind2 = a[i + 1].get_indices()[0]
aa = a[:i] + a[i + 2:]
t1 = tensor_mul(*aa)*g(ind1, ind2)
t1 = t1.contract_metric(g)
args = [t1]
sign = 1
for k in range(i + 2, j):
sign = -sign
ind2 = a[k].get_indices()[0]
aa = a[:i] + a[i + 1:k] + a[k + 1:]
t2 = sign*tensor_mul(*aa)*g(ind1, ind2)
t2 = t2.contract_metric(g)
t2 = simplify_gpgp(t2, False)
args.append(t2)
t3 = TensAdd(*args)
t3 = _trace_single_line(t3)
return t3
else:
a = t.split()
t1 = _gamma_trace1(*a[i:j])
a2 = a[:i] + a[j:]
t2 = tensor_mul(*a2)
t3 = t1*t2
if not t3:
return t3
t3 = t3.contract_metric(g)
return t3
t = t.expand()
if isinstance(t, TensAdd):
a = [_trace_single_line1(x)*x.coeff for x in t.args]
return TensAdd(*a)
elif isinstance(t, (Tensor, TensMul)):
r = t.coeff*_trace_single_line1(t)
return r
else:
return trace(t)
def _gamma_trace1(*a):
gctr = 4
g = LorentzIndex.metric
if not a:
return gctr
n = len(a)
if n%2 == 1:
return S.Zero
if n == 2:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
return gctr*g(ind0, ind1)
if n == 4:
ind0 = a[0].get_indices()[0]
ind1 = a[1].get_indices()[0]
ind2 = a[2].get_indices()[0]
ind3 = a[3].get_indices()[0]
return gctr*(g(ind0, ind1)*g(ind2, ind3) - \
g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2))
def kahane_simplify(expression):
if isinstance(expression, Mul):
return expression
if isinstance(expression, TensAdd):
return TensAdd(*[kahane_simplify(arg) for arg in expression.args])
if isinstance(expression, Tensor):
return expression
assert isinstance(expression, TensMul)
gammas = expression.args
for gamma in gammas:
assert gamma.component == GammaMatrix
free = expression.free
dum = []
for dum_pair in expression.dum:
if expression.index_types[dum_pair[0]] == LorentzIndex:
dum.append((dum_pair[0], dum_pair[1]))
dum = sorted(dum)
if len(dum) == 0:
return expression
# gamma matrix expression to start with a contracted gamma matrix, this is
# a workaround which ignores possible initial free indices, and re-adds
# them later.
first_dum_pos = min(map(min, dum))
# for p1, p2, a1, a2 in expression.dum_in_args:
# if p1 != 0 or p2 != 0:
# # only Lorentz indices, skip Dirac indices:
# continue
# first_dum_pos = min(p1, p2)
# break
total_number = len(free) + len(dum)*2
number_of_contractions = len(dum)
free_pos = [None]*total_number
for i in free:
free_pos[i[1]] = i[0]
# `index_is_free` is a list of booleans, to identify index position
# and whether that index is free or dummy.
index_is_free = [False]*total_number
for i, indx in enumerate(free):
index_is_free[indx[1]] = True
# `links` is a dictionary containing the graph described in Kahane's paper,
links = dict()
for i in range(first_dum_pos, total_number):
links[i] = []
# `cum_sign` is a step variable to mark the sign of every index, see paper.
cum_sign = -1
# `cum_sign_list` keeps storage for all `cum_sign` (every index).
cum_sign_list = [None]*total_number
block_free_count = 0
# multiply `resulting_coeff` by the coefficient parameter, the rest
# of the algorithm ignores a scalar coefficient.
resulting_coeff = S.One
# initialize a list of lists of indices. The outer list will contain all
# additive tensor expressions, while the inner list will contain the
# free indices (rearranged according to the algorithm).
resulting_indices = [[]]
# start to count the `connected_components`, which together with the number
# of contractions, determines a -1 or +1 factor to be multiplied.
connected_components = 1
# First loop: here we fill `cum_sign_list`, and draw the links
# among consecutive indices (they are stored in `links`). Links among
# non-consecutive indices will be drawn later.
for i, is_free in enumerate(index_is_free):
# if `expression` starts with free indices, they are ignored here;
# they are later added as they are to the beginning of all
# `resulting_indices` list of lists of indices.
if i < first_dum_pos:
continue
if is_free:
block_free_count += 1
# if previous index was free as well, draw an arch in `links`.
if block_free_count > 1:
links[i - 1].append(i)
links[i].append(i - 1)
else:
# Change the sign of the index (`cum_sign`) if the number of free
# indices preceding it is even.
cum_sign *= 1 if (block_free_count % 2) else -1
if block_free_count == 0 and i != first_dum_pos:
# check if there are two consecutive dummy indices:
# in this case create virtual indices with negative position,
# these "virtual" indices represent the insertion of two
# gamma^0 matrices to separate consecutive dummy indices, as
# Kahane's algorithm requires dummy indices to be separated by
if cum_sign == -1:
links[-1-i] = [-1-i+1]
links[-1-i+1] = [-1-i]
if (i - cum_sign) in links:
if i != first_dum_pos:
links[i].append(i - cum_sign)
if block_free_count != 0:
if i - cum_sign < len(index_is_free):
if index_is_free[i - cum_sign]:
links[i - cum_sign].append(i)
block_free_count = 0
cum_sign_list[i] = cum_sign
# to Kahane's rules: the negative indices, which handle the case of some
# separated by free indices, hinting that free indices can be added without
# altering the expression result).
for i in dum:
# get the positions of the two contracted indices:
pos1 = i[0]
pos2 = i[1]
# create Kahane's upper links, i.e. the upper arcs between dummy
links[pos1].append(pos2)
links[pos2].append(pos1)
# the line described in the paper:
# first we move `pos1` and `pos2` according to the sign of the indices:
linkpos1 = pos1 + cum_sign_list[pos1]
linkpos2 = pos2 + cum_sign_list[pos2]
# otherwise, perform some checks before creating the lower arcs:
# make sure we are not exceeding the total number of indices:
if linkpos1 >= total_number:
continue
if linkpos2 >= total_number:
continue
# make sure we are not below the first dummy index in `expression`:
if linkpos1 < first_dum_pos:
continue
if linkpos2 < first_dum_pos:
continue
# check if the previous loop created "virtual" indices between dummy
# indices, in such a case relink `linkpos1` and `linkpos2`:
if (-1-linkpos1) in links:
linkpos1 = -1-linkpos1
if (-1-linkpos2) in links:
linkpos2 = -1-linkpos2
# move only if not next to free index:
if linkpos1 >= 0 and not index_is_free[linkpos1]:
linkpos1 = pos1
if linkpos2 >=0 and not index_is_free[linkpos2]:
linkpos2 = pos2
# create the lower arcs:
if linkpos2 not in links[linkpos1]:
links[linkpos1].append(linkpos2)
if linkpos1 not in links[linkpos2]:
links[linkpos2].append(linkpos1)
# This loop starts from the `first_dum_pos` index (first dummy index)
# walks through the graph deleting the visited indices from `links`,
# it adds a gamma matrix for every free index in encounters, while it
# completely ignores dummy indices and virtual indices.
pointer = first_dum_pos
previous_pointer = 0
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
else:
break
if pointer == previous_pointer:
break
if pointer >=0 and free_pos[pointer] is not None:
for ri in resulting_indices:
ri.append(free_pos[pointer])
# The following loop removes the remaining connected components in `links`.
# If there are free indices inside a connected component, it gives a
# contribution to the resulting expression given by the factor
# `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's
# Note: curly brackets are meant as in the paper, as a generalized
# multi-element anticommutator!
while links:
connected_components += 1
pointer = min(links.keys())
previous_pointer = pointer
# the inner loop erases the visited indices from `links`, and it adds
# all free indices to `prepend_indices` list, virtual indices are
# ignored.
prepend_indices = []
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
if len(next_ones) > 1:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
if pointer >= first_dum_pos and free_pos[pointer] is not None:
prepend_indices.insert(0, free_pos[pointer])
# if `prepend_indices` is void, it means there are no free indices
# in the loop (and it can be shown that there must be a virtual index),
# loops of virtual indices only contribute by a factor of two:
if len(prepend_indices) == 0:
resulting_coeff *= 2
# otherwise, add the free indices in `prepend_indices` to
# the `resulting_indices`:
else:
expr1 = prepend_indices
expr2 = list(reversed(prepend_indices))
resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)]
# sign correction, as described in Kahane's paper:
resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1
resulting_coeff *= 2**(number_of_contractions)
# If `first_dum_pos` is not zero, it means that there are trailing free gamma
# matrices in front of `expression`, so multiply by them:
for i in range(0, first_dum_pos):
[ri.insert(0, free_pos[i]) for ri in resulting_indices]
resulting_expr = S.Zero
for i in resulting_indices:
temp_expr = S.One
for j in i:
temp_expr *= GammaMatrix(j)
resulting_expr += temp_expr
t = resulting_coeff * resulting_expr
t1 = None
if isinstance(t, TensAdd):
t1 = t.args[0]
elif isinstance(t, TensMul):
t1 = t
if t1:
pass
else:
t = eye(4)*t
return t
| true
| true
|
790b4c8b76a5ed0fd11341ca90d83351298a921a
| 4,542
|
py
|
Python
|
examples/rnnlm/rnnlm.py
|
kashif/dynet
|
95145a3808c5dd54b17eb9ed109c5815142a9b6c
|
[
"Apache-2.0"
] | null | null | null |
examples/rnnlm/rnnlm.py
|
kashif/dynet
|
95145a3808c5dd54b17eb9ed109c5815142a9b6c
|
[
"Apache-2.0"
] | 1
|
2020-07-01T18:31:27.000Z
|
2020-07-01T18:31:27.000Z
|
examples/rnnlm/rnnlm.py
|
kashif/dynet
|
95145a3808c5dd54b17eb9ed109c5815142a9b6c
|
[
"Apache-2.0"
] | 1
|
2018-12-26T19:04:47.000Z
|
2018-12-26T19:04:47.000Z
|
import dynet as dy
import time
import random
LAYERS = 2
INPUT_DIM = 256 #50 #256
HIDDEN_DIM = 256 # 50 #1024
VOCAB_SIZE = 0
from collections import defaultdict
from itertools import count
import argparse
import sys
import util
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def save_to_disk(self, filename):
dy.save(filename, [self.builder, self.lookup, self.R, self.bias])
def load_from_disk(self, filename):
(self.builder, self.lookup, self.R, self.bias) = dy.load(filename, model)
def build_lm_graph(self, sent):
dy.renew_cg()
init_state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
errs = [] # will hold expressions
es=[]
state = init_state
for (cw,nw) in zip(sent,sent[1:]):
# assume word is already a word-id
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = dy.pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = dy.esum(errs)
return nerr
def predict_next_word(self, sentence):
dy.renew_cg()
init_state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
state = init_state
for cw in sentence:
# assume word is already a word-id
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
prob = dy.softmax(r_t)
return prob
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
cw = first
while True:
x_t = dy.lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
args = parser.parse_args()
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.Model()
trainer = dy.SimpleSGDTrainer(model, learning_rate=1.0)
#lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder)
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.LSTMBuilder)
train = list(train)
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train)
for i,sent in enumerate(train):
_start = time.time()
if i % 50 == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
chars += len(sent)-1
isent = [vocab.w2i[w] for w in sent]
errs = lm.build_lm_graph(isent)
loss += errs.scalar_value()
errs.backward()
trainer.update()
#print "TM:",(time.time() - _start)/len(sent)
print("ITER {}, loss={}".format(ITER, loss))
trainer.status()
lm.save_to_disk("RNNLanguageModel.model")
print("loading the saved model...")
lm.load_from_disk("RNNLanguageModel.model")
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
| 32.676259
| 105
| 0.568692
|
import dynet as dy
import time
import random
LAYERS = 2
INPUT_DIM = 256 DEN_DIM = 256 B_SIZE = 0
from collections import defaultdict
from itertools import count
import argparse
import sys
import util
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
def save_to_disk(self, filename):
dy.save(filename, [self.builder, self.lookup, self.R, self.bias])
def load_from_disk(self, filename):
(self.builder, self.lookup, self.R, self.bias) = dy.load(filename, model)
def build_lm_graph(self, sent):
dy.renew_cg()
init_state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
errs = []
es=[]
state = init_state
for (cw,nw) in zip(sent,sent[1:]):
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
err = dy.pickneglogsoftmax(r_t, int(nw))
errs.append(err)
nerr = dy.esum(errs)
return nerr
def predict_next_word(self, sentence):
dy.renew_cg()
init_state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
state = init_state
for cw in sentence:
x_t = dy.lookup(self.lookup, int(cw))
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
prob = dy.softmax(r_t)
return prob
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
cw = first
while True:
x_t = dy.lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
args = parser.parse_args()
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.Model()
trainer = dy.SimpleSGDTrainer(model, learning_rate=1.0)
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.LSTMBuilder)
train = list(train)
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train)
for i,sent in enumerate(train):
_start = time.time()
if i % 50 == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
chars += len(sent)-1
isent = [vocab.w2i[w] for w in sent]
errs = lm.build_lm_graph(isent)
loss += errs.scalar_value()
errs.backward()
trainer.update()
print("ITER {}, loss={}".format(ITER, loss))
trainer.status()
lm.save_to_disk("RNNLanguageModel.model")
print("loading the saved model...")
lm.load_from_disk("RNNLanguageModel.model")
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
| true
| true
|
790b4cdbb2b430cbe7f136c7a2d9884c8512dd46
| 503
|
py
|
Python
|
17. Chapter_/poem_pub.py
|
Mikma03/Python_Bill_Lubanovic_BookCodes
|
8b5b228bb500a08af645a1db6f7c5f33ef5f0512
|
[
"MIT"
] | null | null | null |
17. Chapter_/poem_pub.py
|
Mikma03/Python_Bill_Lubanovic_BookCodes
|
8b5b228bb500a08af645a1db6f7c5f33ef5f0512
|
[
"MIT"
] | null | null | null |
17. Chapter_/poem_pub.py
|
Mikma03/Python_Bill_Lubanovic_BookCodes
|
8b5b228bb500a08af645a1db6f7c5f33ef5f0512
|
[
"MIT"
] | null | null | null |
import string
import zmq
host = '127.0.0.1'
port = 6789
ctx = zmq.Context()
pub = ctx.socket(zmq.PUB)
pub.bind('tcp://%s:%s' % (host, port))
with open('lokomotywa.txt', 'rt') as poem:
words = poem.read()
for word in words.split():
word = word.strip(string.punctuation)
data = word.encode('utf-8')
if word.startswith(('a','e','i','o','u','y','A','E','I','O','U','Y')):
pub.send_multipart([b'samogloski', data])
if len(word) == 5:
pub.send_multipart([b'piec', data])
| 26.473684
| 74
| 0.586481
|
import string
import zmq
host = '127.0.0.1'
port = 6789
ctx = zmq.Context()
pub = ctx.socket(zmq.PUB)
pub.bind('tcp://%s:%s' % (host, port))
with open('lokomotywa.txt', 'rt') as poem:
words = poem.read()
for word in words.split():
word = word.strip(string.punctuation)
data = word.encode('utf-8')
if word.startswith(('a','e','i','o','u','y','A','E','I','O','U','Y')):
pub.send_multipart([b'samogloski', data])
if len(word) == 5:
pub.send_multipart([b'piec', data])
| true
| true
|
790b4e8080eb902f22d0acc2ff516cfd68038122
| 2,201
|
py
|
Python
|
zerver/webhooks/semaphore/view.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | null | null | null |
zerver/webhooks/semaphore/view.py
|
rtzll/zulip
|
b831df8f7fc2f5b89ec998266901ac491d52a7fc
|
[
"Apache-2.0"
] | 11
|
2021-02-08T20:59:55.000Z
|
2022-03-12T00:51:41.000Z
|
zerver/webhooks/semaphore/view.py
|
usmanmuhd/zulip
|
0600646fbfdcfb20c0c0d47950690a6efac873aa
|
[
"Apache-2.0"
] | null | null | null |
# Webhooks for external integrations.
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.models import get_client
from zerver.lib.actions import check_send_stream_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
import ujson
from typing import Any, Dict
@api_key_only_webhook_view('Semaphore')
@has_request_variables
def api_semaphore_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='builds')):
# type: (HttpRequest, UserProfile, Dict[str, Any], str) -> HttpResponse
# semaphore only gives the last commit, even if there were multiple commits
# since the last build
branch_name = payload["branch_name"]
project_name = payload["project_name"]
result = payload["result"]
event = payload["event"]
commit_id = payload["commit"]["id"]
commit_url = payload["commit"]["url"]
author_email = payload["commit"]["author_email"]
message = payload["commit"]["message"]
if event == "build":
build_url = payload["build_url"]
build_number = payload["build_number"]
content = u"[build %s](%s): %s\n" % (build_number, build_url, result)
elif event == "deploy":
build_url = payload["build_html_url"]
build_number = payload["build_number"]
deploy_url = payload["html_url"]
deploy_number = payload["number"]
server_name = payload["server_name"]
content = u"[deploy %s](%s) of [build %s](%s) on server %s: %s\n" % \
(deploy_number, deploy_url, build_number, build_url, server_name, result)
else: # should never get here
content = u"%s: %s\n" % (event, result)
content += "!avatar(%s) [`%s`](%s): %s" % (author_email, commit_id[:7],
commit_url, message)
subject = u"%s/%s" % (project_name, branch_name)
check_send_stream_message(user_profile, request.client, stream, subject, content)
return json_success()
| 38.614035
| 91
| 0.664244
|
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.models import get_client
from zerver.lib.actions import check_send_stream_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
import ujson
from typing import Any, Dict
@api_key_only_webhook_view('Semaphore')
@has_request_variables
def api_semaphore_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='builds')):
branch_name = payload["branch_name"]
project_name = payload["project_name"]
result = payload["result"]
event = payload["event"]
commit_id = payload["commit"]["id"]
commit_url = payload["commit"]["url"]
author_email = payload["commit"]["author_email"]
message = payload["commit"]["message"]
if event == "build":
build_url = payload["build_url"]
build_number = payload["build_number"]
content = u"[build %s](%s): %s\n" % (build_number, build_url, result)
elif event == "deploy":
build_url = payload["build_html_url"]
build_number = payload["build_number"]
deploy_url = payload["html_url"]
deploy_number = payload["number"]
server_name = payload["server_name"]
content = u"[deploy %s](%s) of [build %s](%s) on server %s: %s\n" % \
(deploy_number, deploy_url, build_number, build_url, server_name, result)
else:
content = u"%s: %s\n" % (event, result)
content += "!avatar(%s) [`%s`](%s): %s" % (author_email, commit_id[:7],
commit_url, message)
subject = u"%s/%s" % (project_name, branch_name)
check_send_stream_message(user_profile, request.client, stream, subject, content)
return json_success()
| true
| true
|
790b4ec6d0ff056774100aa38894b6bfbee68bd6
| 5,040
|
py
|
Python
|
chart/chart/python/spectralsequence_chart/page_property.py
|
JoeyBF/sseq
|
d553df5e2466aaad47f4a36bf5e051a3922b0dd0
|
[
"Apache-2.0",
"MIT"
] | 7
|
2021-04-22T04:06:09.000Z
|
2022-01-25T04:05:49.000Z
|
chart/chart/python/spectralsequence_chart/page_property.py
|
JoeyBF/sseq
|
d553df5e2466aaad47f4a36bf5e051a3922b0dd0
|
[
"Apache-2.0",
"MIT"
] | 68
|
2020-03-21T22:37:24.000Z
|
2022-03-31T02:51:35.000Z
|
chart/chart/python/spectralsequence_chart/page_property.py
|
JoeyBF/sseq
|
d553df5e2466aaad47f4a36bf5e051a3922b0dd0
|
[
"Apache-2.0",
"MIT"
] | 5
|
2021-02-17T06:37:43.000Z
|
2022-02-01T03:53:22.000Z
|
from .infinity import INFINITY
import json
from typing import List, Tuple, Any, Type, Union, TypeVar, Generic, Optional, Dict, cast, Callable
T = TypeVar('T')
class PageProperty(Generic[T]):
"""
A class to represent a property that varies depending on the pages of a spectral sequence.
This is the main helper class that encapsulates any property of a class, edge, or chart
that varies depending on the page.
Examples:
>>> p = PageProperty(1)
>>> p[4] = 7
>>> p[2]
1
>>> p[4]
7
"""
def __init__(self,
value : T,
parent : Optional[Any] = None,
callback : Optional[Callable[[], None]] = None,
):
""" Initialize the PageProperty to always have value v."""
self._values : List[Tuple[int, T]] = [(0, value)]
self.set_parent(parent)
self._callback = callback
def set_parent(self, parent : Optional[Any]):
self._parent = parent
def set_callback(self, callback : Callable[[], None]):
self._callback = callback
def _needs_update(self):
if self._parent:
self._parent._needs_update()
if self._callback:
self._callback()
def _find_index(self, target_page : int) -> Tuple[int, bool]:
result_idx = None
for (idx, (page, _)) in enumerate(self._values):
if page > target_page:
break
result_idx = idx
# We need to help out the type checker here
if result_idx is None:
raise ValueError(f"Page Property indexed with negative index: {target_page}")
return (result_idx, self._values[result_idx][0] == target_page)
def __getitem__(self, x : Union[int, slice]) -> T:
stop = None
if type(x) == slice:
stop = x.stop or INFINITY
x = x.start or 0
if type(x) != int:
raise TypeError(f"Expected integer, got {type(x).__name__}.")
assert type(x) is int # Make type analysis thing happy
(idx, _) = self._find_index(x)
if stop:
(idx2, _) = self._find_index(stop - 1)
if idx != idx2:
raise ValueError("Indexed with slice but value is inconsistent across slice.")
return self._values[idx][1]
def __setitem__(self, p : Union[int, slice], v : T) -> None:
if hasattr(v, "set_parent"):
v.set_parent(self)
if type(p) is int:
self._setitem_single(p, v)
self._merge_redundant()
self._needs_update()
return
if type(p) is not slice:
raise TypeError("Excepted int or slice!")
start = p.start or 0
stop = p.stop or INFINITY
orig_value = self[stop]
(start_idx, _) = self._setitem_single(start, v)
(end_idx, hit_end) = self._find_index(stop)
if not hit_end and stop < INFINITY:
(end_idx, _) = self._setitem_single(stop, orig_value)
if stop == INFINITY:
end_idx += 1
del self._values[start_idx + 1 : end_idx]
self._merge_redundant()
self._needs_update()
def _setitem_single(self, p : int, v : T):
(idx, hit) = self._find_index(p)
if hit:
self._values[idx] = (p, v)
else:
idx += 1
self._values.insert(idx, (p, v))
return (idx, hit)
def _merge_redundant(self):
for i in range(len(self._values) - 1, 0, -1):
if self._values[i][1] == self._values[i-1][1]:
del self._values[i]
def __repr__(self) -> str:
values = ", ".join([f"{page}: {value}" for (page, value) in self._values])
return f"PageProperty{{{values}}}"
def __eq__(self, other):
if type(other) != PageProperty:
return False
return self._values == other._values
def map_values_in_place(self, f):
for i in range(len(self._values)):
(p, v) = self._values[i]
self._values[i] = (p, f(v))
def to_json(self) -> Dict[str, Any]:
if len(self._values) == 1:
return self._values[0][1]
else:
return {"type" : "PageProperty", "values" : self._values }
@staticmethod
def from_json(json_obj : Dict[str, Any]) -> "PageProperty[Any]":
result : PageProperty[Any] = PageProperty(None)
result._values = [cast(Tuple[int, Any], tuple(x)) for x in json_obj["values"]]
return result
S = TypeVar('S')
PagePropertyOrValue = Union[S, PageProperty[S]]
def ensure_page_property(v : PagePropertyOrValue[S], parent : Optional[Any] = None) -> PageProperty[S]:
if(type(v) is PageProperty):
result = v
else:
result = PageProperty(v)
if parent:
result.set_parent(parent)
return result
| 34.520548
| 104
| 0.547619
|
from .infinity import INFINITY
import json
from typing import List, Tuple, Any, Type, Union, TypeVar, Generic, Optional, Dict, cast, Callable
T = TypeVar('T')
class PageProperty(Generic[T]):
def __init__(self,
value : T,
parent : Optional[Any] = None,
callback : Optional[Callable[[], None]] = None,
):
self._values : List[Tuple[int, T]] = [(0, value)]
self.set_parent(parent)
self._callback = callback
def set_parent(self, parent : Optional[Any]):
self._parent = parent
def set_callback(self, callback : Callable[[], None]):
self._callback = callback
def _needs_update(self):
if self._parent:
self._parent._needs_update()
if self._callback:
self._callback()
def _find_index(self, target_page : int) -> Tuple[int, bool]:
result_idx = None
for (idx, (page, _)) in enumerate(self._values):
if page > target_page:
break
result_idx = idx
if result_idx is None:
raise ValueError(f"Page Property indexed with negative index: {target_page}")
return (result_idx, self._values[result_idx][0] == target_page)
def __getitem__(self, x : Union[int, slice]) -> T:
stop = None
if type(x) == slice:
stop = x.stop or INFINITY
x = x.start or 0
if type(x) != int:
raise TypeError(f"Expected integer, got {type(x).__name__}.")
assert type(x) is int
(idx, _) = self._find_index(x)
if stop:
(idx2, _) = self._find_index(stop - 1)
if idx != idx2:
raise ValueError("Indexed with slice but value is inconsistent across slice.")
return self._values[idx][1]
def __setitem__(self, p : Union[int, slice], v : T) -> None:
if hasattr(v, "set_parent"):
v.set_parent(self)
if type(p) is int:
self._setitem_single(p, v)
self._merge_redundant()
self._needs_update()
return
if type(p) is not slice:
raise TypeError("Excepted int or slice!")
start = p.start or 0
stop = p.stop or INFINITY
orig_value = self[stop]
(start_idx, _) = self._setitem_single(start, v)
(end_idx, hit_end) = self._find_index(stop)
if not hit_end and stop < INFINITY:
(end_idx, _) = self._setitem_single(stop, orig_value)
if stop == INFINITY:
end_idx += 1
del self._values[start_idx + 1 : end_idx]
self._merge_redundant()
self._needs_update()
def _setitem_single(self, p : int, v : T):
(idx, hit) = self._find_index(p)
if hit:
self._values[idx] = (p, v)
else:
idx += 1
self._values.insert(idx, (p, v))
return (idx, hit)
def _merge_redundant(self):
for i in range(len(self._values) - 1, 0, -1):
if self._values[i][1] == self._values[i-1][1]:
del self._values[i]
def __repr__(self) -> str:
values = ", ".join([f"{page}: {value}" for (page, value) in self._values])
return f"PageProperty{{{values}}}"
def __eq__(self, other):
if type(other) != PageProperty:
return False
return self._values == other._values
def map_values_in_place(self, f):
for i in range(len(self._values)):
(p, v) = self._values[i]
self._values[i] = (p, f(v))
def to_json(self) -> Dict[str, Any]:
if len(self._values) == 1:
return self._values[0][1]
else:
return {"type" : "PageProperty", "values" : self._values }
@staticmethod
def from_json(json_obj : Dict[str, Any]) -> "PageProperty[Any]":
result : PageProperty[Any] = PageProperty(None)
result._values = [cast(Tuple[int, Any], tuple(x)) for x in json_obj["values"]]
return result
S = TypeVar('S')
PagePropertyOrValue = Union[S, PageProperty[S]]
def ensure_page_property(v : PagePropertyOrValue[S], parent : Optional[Any] = None) -> PageProperty[S]:
if(type(v) is PageProperty):
result = v
else:
result = PageProperty(v)
if parent:
result.set_parent(parent)
return result
| true
| true
|
790b4ee44cef0eaca03d5473dd5685654cdebb5e
| 11,990
|
py
|
Python
|
grr/config/client.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
grr/config/client.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
grr/config/client.py
|
theGreenJedi/grr
|
d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Configuration parameters for the client."""
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib.rdfvalues import crypto
# General Client options.
config_lib.DEFINE_string("Client.name", "GRR",
"The name of the client. This will be used as a base "
"name to generate many other default parameters such "
"as binary names and service names. Note that on "
"Linux we lowercase the name to confirm with most "
"linux naming conventions.")
config_lib.DEFINE_string("Client.binary_name", "%(Client.name)",
"The name of the client binary.")
config_lib.DEFINE_list("Client.labels", [], "Labels for this client.")
config_lib.DEFINE_string("Client.company_name", "GRR Project",
"The name of the company which made the client.")
config_lib.DEFINE_string("Client.description", "%(name) %(platform) %(arch)",
"A description of this specific client build.")
config_lib.DEFINE_string("Client.platform", "windows",
"The platform we are running on.")
config_lib.DEFINE_string("Client.arch", "amd64",
"The architecture we are running on.")
config_lib.DEFINE_string("Client.build_time", "Unknown",
"The time the client was built.")
config_lib.DEFINE_string("Client.deploy_time", "Unknown",
"The time the client was deployed.")
config_lib.DEFINE_string("Client.build_environment", None,
"The output of Uname.FromCurrentSystem.signature() "
"on the system the client was built on.")
config_lib.DEFINE_integer("Client.rsa_key_length", 2048,
"The key length of the client keys in bits.")
config_lib.DEFINE_string(
name="Client.install_path",
default=r"%(SystemRoot|env)\\System32\\%(name)\\%(Source.version_string)",
help="Where the client binaries are installed.")
config_lib.DEFINE_string(
name="Client.component_path",
default=r"%(Client.install_path)/components",
help="Where the client components are installed on the client.")
config_lib.DEFINE_string(
name="Client.component_url_stem",
default="%(Frontend.static_url_path_prefix)components/",
help="A URL path where components will be served from.")
config_lib.DEFINE_semantic(
rdfvalue.RDFURN,
"Client.component_aff4_stem",
default="%(Frontend.static_aff4_prefix)/components/",
description="A common AFF4 stem where components will be served from.")
config_lib.DEFINE_string(
name="Client.rekall_profile_cache_path",
default=r"%(Client.install_path)\\rekall_profiles",
help="Where GRR stores cached Rekall profiles needed for memory analysis")
config_lib.DEFINE_list(name="Client.server_urls",
default=[],
help="Base URL for client control.")
# Deprecated. Remove when all installations switch to Client.server_urls.
config_lib.DEFINE_list("Client.control_urls", ["http://localhost:8080/control"],
"List of URLs of the controlling server.")
config_lib.DEFINE_integer("Client.http_timeout", 100,
"Timeout for HTTP requests.")
config_lib.DEFINE_string("Client.plist_path",
"/Library/LaunchDaemons/com.google.code.grrd.plist",
"Location of our launchctl plist.")
config_lib.DEFINE_string("Client.plist_filename", None,
"Filename of launchctl plist.")
config_lib.DEFINE_string("Client.plist_label", None,
"Identifier label for launchd")
config_lib.DEFINE_string("Client.plist_label_prefix", None,
"Domain for launchd label.")
config_lib.DEFINE_float("Client.poll_min", 0.2,
"Minimum time between polls in seconds.")
config_lib.DEFINE_float("Client.poll_max", 5,
"Maximum time between polls in seconds.")
config_lib.DEFINE_float("Client.error_poll_min", 15,
"Minimum time between polls in seconds if the server "
"reported an error.")
config_lib.DEFINE_float("Client.poll_slew", 1.15, "Slew of poll time.")
config_lib.DEFINE_integer("Client.connection_error_limit", 60 * 24,
"If the client encounters this many connection "
"errors, it exits and restarts. Retries are one "
"minute apart.")
config_lib.DEFINE_integer("Client.retry_error_limit", 10,
"If the client encounters this many connection "
"errors, it searches for a new proxy/server url "
"combination.")
config_lib.DEFINE_list(
name="Client.proxy_servers",
help="List of valid proxy servers the client should try.",
default=[])
config_lib.DEFINE_integer("Client.max_post_size", 40000000,
"Maximum size of the post.")
config_lib.DEFINE_integer("Client.max_out_queue", 51200000,
"Maximum size of the output queue.")
config_lib.DEFINE_integer("Client.foreman_check_frequency", 1800,
"The minimum number of seconds before checking with "
"the foreman for new work.")
config_lib.DEFINE_float("Client.rss_max", 1000,
"Maximum memory footprint in MB (soft limit). "
"Exceeding this will result in an orderly shutdown.")
config_lib.DEFINE_float("Client.rss_max_hard", 2000,
"Maximum memory footprint in MB (hard limit). "
"Exceeding this will result in aborting the current "
"client action and restarting.")
config_lib.DEFINE_string(
name="Client.tempfile_prefix",
help="Prefix to use for temp files created by the GRR client.",
default="tmp%(Client.name)")
config_lib.DEFINE_list(
name="Client.tempdir_roots",
help="List of temporary directories to use on the client.",
default=["/var/tmp/"])
config_lib.DEFINE_string(
name="Client.grr_tempdir",
help="Default subdirectory in the temp directory to use for GRR.",
default="%(Client.name)")
config_lib.DEFINE_list(
name="Client.vfs_virtualroots",
help=("If this is set for a VFS type, client VFS operations will always be"
" relative to the given root. Format is os:/mount/disk."),
default=[])
# Windows client specific options.
config_lib.DEFINE_string("Client.config_hive",
r"HKEY_LOCAL_MACHINE",
help="The registry hive where the client "
"configuration will be stored.")
config_lib.DEFINE_string("Client.config_key",
r"Software\\GRR",
help="The registry key where client configuration "
"will be stored.")
# Client Cryptographic options. Here we define defaults for key values.
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"Client.private_key",
description="Client private key in pem format. If not provided this "
"will be generated by the enrollment process.",)
config_lib.DEFINE_semantic(
crypto.RDFX509Cert,
"CA.certificate",
description="Trusted CA certificate in X509 pem format",)
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.executable_signing_public_key",
description="public key for verifying executable signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.executable_signing_private_key",
description="Private keys for signing executables. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.driver_signing_public_key",
description="public key for verifying driver signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.driver_signing_private_key",
description="Private keys for signing drivers. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_integer("Client.server_serial_number", 0,
"Minimal serial number we accept for server cert.")
config_lib.DEFINE_integer(
"Client.gc_frequency", 10,
"Defines how often the client calls garbage collection (seconds).")
# The following configuration options are defined here but are used in
# the windows nanny code (grr/client/nanny/windows_nanny.h).
config_lib.DEFINE_string("Nanny.child_binary",
"GRR.exe",
help="The location to the client binary.")
config_lib.DEFINE_string("Nanny.child_command_line",
"%(Nanny.child_binary)",
help="The command line to launch the client binary.")
config_lib.DEFINE_string("Nanny.logfile", "%(Logging.path)/nanny.log",
"The file where we write the nanny transaction log.")
config_lib.DEFINE_string("Nanny.service_name",
"GRR Service",
help="The name of the nanny.")
config_lib.DEFINE_string("Nanny.service_description",
"GRR Service",
help="The description of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key",
r"%(Client.config_key)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key_hive",
r"%(Client.config_hive)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.statusfile", "%(Logging.path)/nanny.status",
"The file where we write the nanny status.")
config_lib.DEFINE_string("Nanny.status", "",
"The regkey where we write the nanny status.")
config_lib.DEFINE_string("Nanny.binary",
r"%(Client.install_path)\\%(service_binary_name)",
help="The full location to the nanny binary.")
config_lib.DEFINE_string("Nanny.service_binary_name",
"%(Client.name)service.exe",
help="The executable name of the nanny binary.")
config_lib.DEFINE_integer("Nanny.unresponsive_kill_period", 60,
"The time in seconds after which the nanny kills us.")
config_lib.DEFINE_integer("Network.api", 3,
"The version of the network protocol the client "
"uses.")
config_lib.DEFINE_string("Network.compression",
default="ZCOMPRESS",
help="Type of compression (ZCOMPRESS, UNCOMPRESSED)")
# Installer options.
config_lib.DEFINE_string(
name="Installer.logfile",
default="%(Logging.path)/%(Client.name)_installer.txt",
help=("A specific log file which is used for logging the "
"installation process."))
config_lib.DEFINE_list("Installer.old_key_map", [
"HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate->Client.private_key",
"HKEY_LOCAL_MACHINE\\Software\\GRR\\server_serial_number"
"->Client.server_serial_number",
], """
A mapping of old registry values which will be copied to new values. The old
value location must start with a valid hive name, followed by a key name, and
end with the value name. The source location must be separated from the new
parameter name by a -> symbol.
This setting allows to carry over settings from obsolete client installations to
newer versions of the client which may store the same information in other
locations.
For example:
HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate -> Client.private_key
""")
| 40.506757
| 80
| 0.643453
|
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib.rdfvalues import crypto
config_lib.DEFINE_string("Client.name", "GRR",
"The name of the client. This will be used as a base "
"name to generate many other default parameters such "
"as binary names and service names. Note that on "
"Linux we lowercase the name to confirm with most "
"linux naming conventions.")
config_lib.DEFINE_string("Client.binary_name", "%(Client.name)",
"The name of the client binary.")
config_lib.DEFINE_list("Client.labels", [], "Labels for this client.")
config_lib.DEFINE_string("Client.company_name", "GRR Project",
"The name of the company which made the client.")
config_lib.DEFINE_string("Client.description", "%(name) %(platform) %(arch)",
"A description of this specific client build.")
config_lib.DEFINE_string("Client.platform", "windows",
"The platform we are running on.")
config_lib.DEFINE_string("Client.arch", "amd64",
"The architecture we are running on.")
config_lib.DEFINE_string("Client.build_time", "Unknown",
"The time the client was built.")
config_lib.DEFINE_string("Client.deploy_time", "Unknown",
"The time the client was deployed.")
config_lib.DEFINE_string("Client.build_environment", None,
"The output of Uname.FromCurrentSystem.signature() "
"on the system the client was built on.")
config_lib.DEFINE_integer("Client.rsa_key_length", 2048,
"The key length of the client keys in bits.")
config_lib.DEFINE_string(
name="Client.install_path",
default=r"%(SystemRoot|env)\\System32\\%(name)\\%(Source.version_string)",
help="Where the client binaries are installed.")
config_lib.DEFINE_string(
name="Client.component_path",
default=r"%(Client.install_path)/components",
help="Where the client components are installed on the client.")
config_lib.DEFINE_string(
name="Client.component_url_stem",
default="%(Frontend.static_url_path_prefix)components/",
help="A URL path where components will be served from.")
config_lib.DEFINE_semantic(
rdfvalue.RDFURN,
"Client.component_aff4_stem",
default="%(Frontend.static_aff4_prefix)/components/",
description="A common AFF4 stem where components will be served from.")
config_lib.DEFINE_string(
name="Client.rekall_profile_cache_path",
default=r"%(Client.install_path)\\rekall_profiles",
help="Where GRR stores cached Rekall profiles needed for memory analysis")
config_lib.DEFINE_list(name="Client.server_urls",
default=[],
help="Base URL for client control.")
config_lib.DEFINE_list("Client.control_urls", ["http://localhost:8080/control"],
"List of URLs of the controlling server.")
config_lib.DEFINE_integer("Client.http_timeout", 100,
"Timeout for HTTP requests.")
config_lib.DEFINE_string("Client.plist_path",
"/Library/LaunchDaemons/com.google.code.grrd.plist",
"Location of our launchctl plist.")
config_lib.DEFINE_string("Client.plist_filename", None,
"Filename of launchctl plist.")
config_lib.DEFINE_string("Client.plist_label", None,
"Identifier label for launchd")
config_lib.DEFINE_string("Client.plist_label_prefix", None,
"Domain for launchd label.")
config_lib.DEFINE_float("Client.poll_min", 0.2,
"Minimum time between polls in seconds.")
config_lib.DEFINE_float("Client.poll_max", 5,
"Maximum time between polls in seconds.")
config_lib.DEFINE_float("Client.error_poll_min", 15,
"Minimum time between polls in seconds if the server "
"reported an error.")
config_lib.DEFINE_float("Client.poll_slew", 1.15, "Slew of poll time.")
config_lib.DEFINE_integer("Client.connection_error_limit", 60 * 24,
"If the client encounters this many connection "
"errors, it exits and restarts. Retries are one "
"minute apart.")
config_lib.DEFINE_integer("Client.retry_error_limit", 10,
"If the client encounters this many connection "
"errors, it searches for a new proxy/server url "
"combination.")
config_lib.DEFINE_list(
name="Client.proxy_servers",
help="List of valid proxy servers the client should try.",
default=[])
config_lib.DEFINE_integer("Client.max_post_size", 40000000,
"Maximum size of the post.")
config_lib.DEFINE_integer("Client.max_out_queue", 51200000,
"Maximum size of the output queue.")
config_lib.DEFINE_integer("Client.foreman_check_frequency", 1800,
"The minimum number of seconds before checking with "
"the foreman for new work.")
config_lib.DEFINE_float("Client.rss_max", 1000,
"Maximum memory footprint in MB (soft limit). "
"Exceeding this will result in an orderly shutdown.")
config_lib.DEFINE_float("Client.rss_max_hard", 2000,
"Maximum memory footprint in MB (hard limit). "
"Exceeding this will result in aborting the current "
"client action and restarting.")
config_lib.DEFINE_string(
name="Client.tempfile_prefix",
help="Prefix to use for temp files created by the GRR client.",
default="tmp%(Client.name)")
config_lib.DEFINE_list(
name="Client.tempdir_roots",
help="List of temporary directories to use on the client.",
default=["/var/tmp/"])
config_lib.DEFINE_string(
name="Client.grr_tempdir",
help="Default subdirectory in the temp directory to use for GRR.",
default="%(Client.name)")
config_lib.DEFINE_list(
name="Client.vfs_virtualroots",
help=("If this is set for a VFS type, client VFS operations will always be"
" relative to the given root. Format is os:/mount/disk."),
default=[])
config_lib.DEFINE_string("Client.config_hive",
r"HKEY_LOCAL_MACHINE",
help="The registry hive where the client "
"configuration will be stored.")
config_lib.DEFINE_string("Client.config_key",
r"Software\\GRR",
help="The registry key where client configuration "
"will be stored.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"Client.private_key",
description="Client private key in pem format. If not provided this "
"will be generated by the enrollment process.",)
config_lib.DEFINE_semantic(
crypto.RDFX509Cert,
"CA.certificate",
description="Trusted CA certificate in X509 pem format",)
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.executable_signing_public_key",
description="public key for verifying executable signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.executable_signing_private_key",
description="Private keys for signing executables. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_semantic(
crypto.RSAPublicKey,
"Client.driver_signing_public_key",
description="public key for verifying driver signing.")
config_lib.DEFINE_semantic(
crypto.RSAPrivateKey,
"PrivateKeys.driver_signing_private_key",
description="Private keys for signing drivers. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_integer("Client.server_serial_number", 0,
"Minimal serial number we accept for server cert.")
config_lib.DEFINE_integer(
"Client.gc_frequency", 10,
"Defines how often the client calls garbage collection (seconds).")
config_lib.DEFINE_string("Nanny.child_binary",
"GRR.exe",
help="The location to the client binary.")
config_lib.DEFINE_string("Nanny.child_command_line",
"%(Nanny.child_binary)",
help="The command line to launch the client binary.")
config_lib.DEFINE_string("Nanny.logfile", "%(Logging.path)/nanny.log",
"The file where we write the nanny transaction log.")
config_lib.DEFINE_string("Nanny.service_name",
"GRR Service",
help="The name of the nanny.")
config_lib.DEFINE_string("Nanny.service_description",
"GRR Service",
help="The description of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key",
r"%(Client.config_key)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key_hive",
r"%(Client.config_hive)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.statusfile", "%(Logging.path)/nanny.status",
"The file where we write the nanny status.")
config_lib.DEFINE_string("Nanny.status", "",
"The regkey where we write the nanny status.")
config_lib.DEFINE_string("Nanny.binary",
r"%(Client.install_path)\\%(service_binary_name)",
help="The full location to the nanny binary.")
config_lib.DEFINE_string("Nanny.service_binary_name",
"%(Client.name)service.exe",
help="The executable name of the nanny binary.")
config_lib.DEFINE_integer("Nanny.unresponsive_kill_period", 60,
"The time in seconds after which the nanny kills us.")
config_lib.DEFINE_integer("Network.api", 3,
"The version of the network protocol the client "
"uses.")
config_lib.DEFINE_string("Network.compression",
default="ZCOMPRESS",
help="Type of compression (ZCOMPRESS, UNCOMPRESSED)")
config_lib.DEFINE_string(
name="Installer.logfile",
default="%(Logging.path)/%(Client.name)_installer.txt",
help=("A specific log file which is used for logging the "
"installation process."))
config_lib.DEFINE_list("Installer.old_key_map", [
"HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate->Client.private_key",
"HKEY_LOCAL_MACHINE\\Software\\GRR\\server_serial_number"
"->Client.server_serial_number",
], """
A mapping of old registry values which will be copied to new values. The old
value location must start with a valid hive name, followed by a key name, and
end with the value name. The source location must be separated from the new
parameter name by a -> symbol.
This setting allows to carry over settings from obsolete client installations to
newer versions of the client which may store the same information in other
locations.
For example:
HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate -> Client.private_key
""")
| true
| true
|
790b4ff35d6db34f32b724826572e3ca28c13a53
| 8,505
|
py
|
Python
|
gamse/utils/onedarray.py
|
wangleon/gamse
|
ed2a3730469a1eeef3def1beca990e9d2641a53b
|
[
"Apache-2.0"
] | 10
|
2019-04-10T15:05:50.000Z
|
2021-11-28T15:31:38.000Z
|
gamse/utils/onedarray.py
|
wangleon/gamse
|
ed2a3730469a1eeef3def1beca990e9d2641a53b
|
[
"Apache-2.0"
] | 15
|
2020-04-07T07:29:27.000Z
|
2022-02-19T15:47:04.000Z
|
gamse/utils/onedarray.py
|
wangleon/gamse
|
ed2a3730469a1eeef3def1beca990e9d2641a53b
|
[
"Apache-2.0"
] | 2
|
2020-04-02T09:04:27.000Z
|
2020-10-14T15:29:10.000Z
|
from itertools import tee
import numpy as np
import scipy.interpolate as intp
from scipy.signal import savgol_filter
def get_edge_bin(array):
"""Detect the edge indcies of a binary 1-D array.
Args:
array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary
(0/1) or boolean (True/False) values.
Returns:
list: A list containing starting and ending indices of the non-zero
blocks.
Examples:
.. code-block:: python
>>> a = [0,1,1,0,0,0,1,0,1]
>>> get_edge_bin(a)
[(1, 3), (6, 7), (8, 9)]
>>> b = [True, False, True, True, False, False]
>>> get_edge_bin(b)
[(0, 1), (2, 4)]
"""
array1 = np.int64(array)
array1 = np.insert(array1, 0, 0)
array1 = np.append(array1, 0)
tmp = array1 - np.roll(array1, 1)
i1_lst = np.nonzero(tmp == 1)[0] - 1
i2_lst = np.nonzero(tmp ==-1)[0] - 1
return list(zip(i1_lst, i2_lst))
def get_local_minima(x, window=None):
"""Get the local minima of a 1d array in a window.
Args:
x (:class:`numpy.ndarray`): A list or Numpy 1d array.
window (*int* or :class:`numpy.ndarray`): An odd integer or a list of
odd integers as the lengthes of searching window.
Returns:
tuple: A tuple containing:
* **index** (:class:`numpy.ndarray`): A numpy 1d array containing
indices of all local minima.
* **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing
values of all local minima.
"""
x = np.array(x)
dif = np.diff(x)
ind = dif > 0
tmp = np.logical_xor(ind, np.roll(ind,1))
idx = np.logical_and(tmp,ind)
index = np.where(idx)[0]
if window is None:
# window is not given
return index, x[index]
else:
# window is given
if isinstance(window, int):
# window is an integer
window = np.repeat(window, len(x))
elif isinstance(window, np.ndarray):
# window is a numpy array
#if np.issubdtype(window.dtype, int):
if window.dtype.type in [np.int16, np.int32, np.int64]:
pass
else:
# window are not integers
print('window array are not integers')
raise ValueError
else:
raise ValueError
if 0 in window%2:
# not all of the windows are odd
raise ValueError
halfwin_lst = (window-1)//2
index_lst = []
for i in index:
halfwin = halfwin_lst[i]
i1 = max(0, i-halfwin)
i2 = min(i+halfwin+1, len(x))
if i == x[i1:i2].argmin() + i1:
index_lst.append(i)
if len(index_lst)>0:
index_lst = np.array(index_lst)
return index_lst, x[index_lst]
else:
return np.array([]), np.array([])
def implete_none(lst):
"""Replace the None elemnets at the beginning and the end of list by auto
increment integers.
Convert the first and last few `None` elements to auto increment integers.
These integers are determined by the first and last integers in the input
array.
While the `None` elements between two integers in the input list will
remain.
Args:
lst (list): A list contaning None values.
Returns:
newlst (list): A list containing auto increment integers.
Examples:
.. code-block:: python
>>> a = [None,None,3,4,None,5,6,None,None]
>>> implete_none(a)
[1, 2, 3, 4, None, 5, 6, 7, 8]
"""
# filter the None values
notnone_lst = [v for v in lst if v is not None]
for i, v in enumerate(lst):
if v == notnone_lst[0]:
# first not-None element and its index
notnone1 = i
value1 = v
if v == notnone_lst[-1]:
# last not-None element and its index
notnone2 = i
value2 = v
newlst = []
for i,v in enumerate(lst):
if i < notnone1:
newlst.append(value1-(notnone1-i))
elif i > notnone2:
newlst.append(value2+(i-notnone2))
else:
newlst.append(v)
return newlst
def derivative(*args, **kwargs):
"""Get the first derivative of data arrays (*x*, *y*).
If **y** is not given, the first argument will be taken as **y**, and the
differential of the input array will be returned.
Args:
x (list or :class:`numpy.ndarray`): X-values of the input array (optional).
y (list or :class:`numpy.ndarray`): Y-values of the input array.
points (int): Number of points used to calculate derivative
(optional, default is 3).
Returns:
:class:`numpy.ndarray`: Derivative of the input array.
"""
if len(args) == 1:
y = np.array(args[0], dtype=np.float64)
x = np.arange(y.size)
elif len(args) == 2:
x = np.array(args[0], dtype=np.float64)
y = np.array(args[1], dtype=np.float64)
else:
raise ValueError
npts = x.size
points = kwargs.pop('points', 3)
if points == 3:
der = (np.roll(y,-1) - np.roll(y,1))/(np.roll(x,-1) - np.roll(x,1))
a = np.array([-3., 4., -1.])
der[0] = (a*y[0:3]).sum() / (a*x[0:3]).sum()
der[-1] = (-a[::-1]*y[-3:]).sum() / (-a[::-1]*x[-3:]).sum()
return der
else:
raise ValueError
def pairwise(array):
"""Return pairwises of an iterable arrary.
Args:
array (list or :class:`numpy.ndarray`): The input iterable array.
Returns:
:class:`zip`: zip objects.
"""
a, b = tee(array)
next(b, None)
return zip(a, b)
def smooth(array, points, deg):
"""Smooth an array.
Args:
array (:class:`numpy.ndarray`): Input array.
points (int): Points of smoothing.
deg (int): Degree of smoothing.
Returns:
:class:`numpy.ndarray`: smoothed array
"""
n = array.size
if points == 5:
if deg == 2:
w_2 = np.array([31., 9., -3., -5., 3.])/35.
w_1 = np.array([ 9., 13., 12., 6., -5.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
elif deg == 3:
w_2 = np.array([69., 4., -6., 4., -1.])/70.
w_1 = np.array([ 2., 27., 12., -8., 2.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
a = np.zeros((n, n))
a[0, 0:5] = w_2
a[1, 0:5] = w_1
for i in np.arange(2, n-2):
a[i, i-2:i+3] = w_0
a[-2, -5:] = w_1[::-1]
a[-1, -5:] = w_2[::-1]
result = np.matrix(a)*np.matrix(array.reshape(-1,1))
return np.array(result)[:,0]
def iterative_savgol_filter(y, winlen=5, order=3, maxiter=10,
upper_clip=None, lower_clip=None):
"""Smooth the input array with Savitzky-Golay filter with lower and/or
upper clippings.
Args:
y (:class:`numpy.ndarray`): Input array.
winlen (int): Window length of Savitzky-Golay filter.
order (int): Order of Savitzky-Gaoly filter.
maxiter (int): Maximum number of iterations.
lower_clip (float): Lower sigma-clipping value.
upper_clip (float): Upper sigma-clipping value.
Returns:
tuple: A tuple containing:
* **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.
* **yres** (:class:`numpy.ndarray`) – Residuals of y values.
* **mask** (:class:`numpy.ndarray`) – Mask of y values.
* **std** (float) – Standard deviation.
"""
x = np.arange(y.size)
mask = np.ones_like(y, dtype=np.bool)
for ite in range(maxiter):
# fill masked values in y using interpolation
f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3)
ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order)
yres = y - ysmooth
std = yres[mask].std()
# generate new mask
# make a copy of existing mask
new_mask = mask * np.ones_like(mask, dtype=np.bool)
# give new mask with lower and upper clipping value
if lower_clip is not None:
new_mask *= (yres > -lower_clip * std)
if upper_clip is not None:
new_mask *= (yres < upper_clip * std)
if new_mask.sum() == mask.sum():
break
mask = new_mask
return ysmooth, yres, mask, std
| 31.735075
| 83
| 0.544033
|
from itertools import tee
import numpy as np
import scipy.interpolate as intp
from scipy.signal import savgol_filter
def get_edge_bin(array):
array1 = np.int64(array)
array1 = np.insert(array1, 0, 0)
array1 = np.append(array1, 0)
tmp = array1 - np.roll(array1, 1)
i1_lst = np.nonzero(tmp == 1)[0] - 1
i2_lst = np.nonzero(tmp ==-1)[0] - 1
return list(zip(i1_lst, i2_lst))
def get_local_minima(x, window=None):
x = np.array(x)
dif = np.diff(x)
ind = dif > 0
tmp = np.logical_xor(ind, np.roll(ind,1))
idx = np.logical_and(tmp,ind)
index = np.where(idx)[0]
if window is None:
return index, x[index]
else:
if isinstance(window, int):
window = np.repeat(window, len(x))
elif isinstance(window, np.ndarray):
if window.dtype.type in [np.int16, np.int32, np.int64]:
pass
else:
print('window array are not integers')
raise ValueError
else:
raise ValueError
if 0 in window%2:
raise ValueError
halfwin_lst = (window-1)//2
index_lst = []
for i in index:
halfwin = halfwin_lst[i]
i1 = max(0, i-halfwin)
i2 = min(i+halfwin+1, len(x))
if i == x[i1:i2].argmin() + i1:
index_lst.append(i)
if len(index_lst)>0:
index_lst = np.array(index_lst)
return index_lst, x[index_lst]
else:
return np.array([]), np.array([])
def implete_none(lst):
notnone_lst = [v for v in lst if v is not None]
for i, v in enumerate(lst):
if v == notnone_lst[0]:
notnone1 = i
value1 = v
if v == notnone_lst[-1]:
notnone2 = i
value2 = v
newlst = []
for i,v in enumerate(lst):
if i < notnone1:
newlst.append(value1-(notnone1-i))
elif i > notnone2:
newlst.append(value2+(i-notnone2))
else:
newlst.append(v)
return newlst
def derivative(*args, **kwargs):
if len(args) == 1:
y = np.array(args[0], dtype=np.float64)
x = np.arange(y.size)
elif len(args) == 2:
x = np.array(args[0], dtype=np.float64)
y = np.array(args[1], dtype=np.float64)
else:
raise ValueError
npts = x.size
points = kwargs.pop('points', 3)
if points == 3:
der = (np.roll(y,-1) - np.roll(y,1))/(np.roll(x,-1) - np.roll(x,1))
a = np.array([-3., 4., -1.])
der[0] = (a*y[0:3]).sum() / (a*x[0:3]).sum()
der[-1] = (-a[::-1]*y[-3:]).sum() / (-a[::-1]*x[-3:]).sum()
return der
else:
raise ValueError
def pairwise(array):
a, b = tee(array)
next(b, None)
return zip(a, b)
def smooth(array, points, deg):
n = array.size
if points == 5:
if deg == 2:
w_2 = np.array([31., 9., -3., -5., 3.])/35.
w_1 = np.array([ 9., 13., 12., 6., -5.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
elif deg == 3:
w_2 = np.array([69., 4., -6., 4., -1.])/70.
w_1 = np.array([ 2., 27., 12., -8., 2.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
a = np.zeros((n, n))
a[0, 0:5] = w_2
a[1, 0:5] = w_1
for i in np.arange(2, n-2):
a[i, i-2:i+3] = w_0
a[-2, -5:] = w_1[::-1]
a[-1, -5:] = w_2[::-1]
result = np.matrix(a)*np.matrix(array.reshape(-1,1))
return np.array(result)[:,0]
def iterative_savgol_filter(y, winlen=5, order=3, maxiter=10,
upper_clip=None, lower_clip=None):
x = np.arange(y.size)
mask = np.ones_like(y, dtype=np.bool)
for ite in range(maxiter):
f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3)
ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order)
yres = y - ysmooth
std = yres[mask].std()
new_mask = mask * np.ones_like(mask, dtype=np.bool)
if lower_clip is not None:
new_mask *= (yres > -lower_clip * std)
if upper_clip is not None:
new_mask *= (yres < upper_clip * std)
if new_mask.sum() == mask.sum():
break
mask = new_mask
return ysmooth, yres, mask, std
| true
| true
|
790b50ddf6e81681112961b12f0414a0cce079a3
| 1,236
|
py
|
Python
|
assignment_day_13.py
|
anjali0503/Letsupgrade_Advance_Python_Django
|
40e9468607b94d4aab70ca760f3b2e62cd693dc3
|
[
"Apache-2.0"
] | null | null | null |
assignment_day_13.py
|
anjali0503/Letsupgrade_Advance_Python_Django
|
40e9468607b94d4aab70ca760f3b2e62cd693dc3
|
[
"Apache-2.0"
] | null | null | null |
assignment_day_13.py
|
anjali0503/Letsupgrade_Advance_Python_Django
|
40e9468607b94d4aab70ca760f3b2e62cd693dc3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Assignment Day :13
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hCwbVUHmWUKYdN7xNNeZcze9aGEmlGKz
"""
# Q1.
#Remove the hardcoded part from the code with the help of configparser
import os
from configparser import ConfigParser
config=ConfigParser()
config.read("D://Py Ex/Advance-py//ex_config.ini")
path=config.get("Exten", "path")
old_text=config.get("Exten","OE")
New_text=config.get("Exten","NE")
os.chdir(path)
os.getcwd()
for file in os.listdir():
if file.endswith(old_text):
first_name=file.rsplit(".",1)[0]
new_name=first_name+"."+New_text
print(new_name)
os.rename(file,new_name)
#Q2
#The question has been asked in an interview
#Please write the code in such a way so that it could give all path of a #file (same name ) which is present in multiple locations.
import os
resp =os.walk("C:\\company\\names")
d1= {}
for r,d,f in resp:
for file in f:
d1.setdefault(file,[]).append(r)
print(d1)
file_name = input("Enter the file name ")
for k,v in d1.items():
if file_name.lower() in k.lower() :
print (k,":", v)
for find_file in v:
print(find_file)
| 25.22449
| 132
| 0.68123
|
import os
from configparser import ConfigParser
config=ConfigParser()
config.read("D://Py Ex/Advance-py//ex_config.ini")
path=config.get("Exten", "path")
old_text=config.get("Exten","OE")
New_text=config.get("Exten","NE")
os.chdir(path)
os.getcwd()
for file in os.listdir():
if file.endswith(old_text):
first_name=file.rsplit(".",1)[0]
new_name=first_name+"."+New_text
print(new_name)
os.rename(file,new_name)
for r,d,f in resp:
for file in f:
d1.setdefault(file,[]).append(r)
print(d1)
file_name = input("Enter the file name ")
for k,v in d1.items():
if file_name.lower() in k.lower() :
print (k,":", v)
for find_file in v:
print(find_file)
| true
| true
|
790b51685325f4443457e426c465aef7492da339
| 16,030
|
py
|
Python
|
Real Topology Graph/GNN Model 1/Fully Connected Graph/Main_MLP_line.py
|
HusseinLezzaik/Consensus-Algorithm-for-2-Mobile-Robots
|
0109c78106dff7640a8fc5601e0333b5397f5b4e
|
[
"MIT"
] | 9
|
2021-04-20T08:12:47.000Z
|
2022-02-18T02:25:29.000Z
|
Real Topology Graph/GNN Model 1/Fully Connected Graph/Main_MLP_line.py
|
HusseinLezzaik/Deep-Learning-for-Multi-Robotics
|
ecdb28793cc1f5fa6cded752908105ec37e9bfc7
|
[
"MIT"
] | null | null | null |
Real Topology Graph/GNN Model 1/Fully Connected Graph/Main_MLP_line.py
|
HusseinLezzaik/Deep-Learning-for-Multi-Robotics
|
ecdb28793cc1f5fa6cded752908105ec37e9bfc7
|
[
"MIT"
] | 1
|
2021-11-16T08:16:36.000Z
|
2021-11-16T08:16:36.000Z
|
"""
Consensus Algorithm for 6 Mobile robots using MLP Model for Line Graph Implementation
Inputs: Mx, My
Outputs: Ux, Uy
"""
import torch
import MLP_Model
import math
import numpy as np
import rclpy
from rclpy.node import Node
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Float32
import time
L = 1
d = 0.5
#distance = 2
A = np.ones(6) - np.identity(6) # Adjancency Matrix fully connected case 6x6
ux = np.zeros((6,1)) # 6x1 controller vector
uy = np.zeros((6,1)) # 6x1 controller vector
# load model using dict
FILE = "model.pth"
loaded_model = MLP_Model.MLP()
loaded_model.load_state_dict(torch.load(FILE))
loaded_model.eval()
def euler_from_quaternion(x, y, z, w):
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return yaw_z # in radians
class MinimalPublisher(Node):
def __init__(self):
super().__init__('minimal_publisher1')
self.publisher_l1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32
self.publisher_r1 = self.create_publisher(Float32, '/rightMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32
self.publisher_l2 = self.create_publisher(Float32, '/leftMotorSpeedrobot2', 0) #Change according to topic in child script,String to Float32
self.publisher_r2 = self.create_publisher(Float32, '/rightMotorSpeedrobot2', 0) #Change according to topic in child script,String to Float32
self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32
self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32
self.publisher_l4 = self.create_publisher(Float32, '/leftMotorSpeedrobot4', 0) #Change according to topic in child script,String to Float32
self.publisher_r4 = self.create_publisher(Float32, '/rightMotorSpeedrobot4', 0) #Change according to topic in child script,String to Float32
self.publisher_l5 = self.create_publisher(Float32, '/leftMotorSpeedrobot5', 0) #Change according to topic in child script,String to Float32
self.publisher_r5 = self.create_publisher(Float32, '/rightMotorSpeedrobot5', 0) #Change according to topic in child script,String to Float32
self.publisher_l6 = self.create_publisher(Float32, '/leftMotorSpeedrobot6', 0) #Change according to topic in child script,String to Float32
self.publisher_r6 = self.create_publisher(Float32, '/rightMotorSpeedrobot6', 0) #Change according to topic in child script,String to Float32
self.subscription = self.create_subscription(
TFMessage,
'/tf',
self.listener_callback,
0)
" Timer Callback "
#self.publisher_ = self.create_publisher(Float32(), 'topic', 10)
timer_period = 0.01 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
"Parameters "
self.k = 1 # Control Gain
self.scene = 0 # Nb of scene iteration
" Mobile Robot 1 Parameters "
self.x1 = 0
self.y1 = 0
self.Theta1 = 0
self.v1 = 0
self.w1 = 0
self.vL1 = 0
self.vR1 = 0
" Mobile Robot 1 Parameters "
self.x2 = 0
self.y2 = 0
self.Theta2 = 0
self.v2 = 0
self.w2 = 0
self.vL2 = 0
self.vR2 = 0
" Mobile Robot 3 Parameters "
self.x3 = 0
self.y3 = 0
self.Theta3 = 0
self.v3 = 0
self.w3 = 0
self.vL3 = 0
self.vR3 = 0
" Mobile Robot 4 Parameters "
self.x4 = 0
self.y4 = 0
self.Theta4 = 0
self.v4 = 0
self.w4 = 0
self.vL4 = 0
self.vR4 = 0
" Mobile Robot 5 Parameters "
self.x5 = 0
self.y5 = 0
self.Theta5 = 0
self.v5 = 0
self.w5 = 0
self.vL5 = 0
self.vR5 = 0
" Mobile Robot 6 Parameters "
self.x6 = 0
self.y6 = 0
self.Theta6 = 0
self.v6 = 0
self.w6 = 0
self.vL6 = 0
self.vR6 = 0
def timer_callback(self):
" Calculate Mx1, My1, ...... Mx6, My6 "
Mx1 = self.x2 - self.x1 # 1x1
My1 = self.y2 - self.y1 # 1x1
Mx2 = ( ( self.x1 - self.x2 ) + ( self.x3 - self.x2 ) ) / 2 # 1x1
My2 = ( ( self.y1 - self.y2 ) + ( self.y3 - self.y2 ) ) / 2 # 1x1
Mx3 = ( ( self.x2 - self.x3 ) + ( self.x4 - self.x3 ) ) / 2 # 1x1
My3 = ( ( self.y2 - self.y3 ) + ( self.y4 - self.y3 ) ) / 2 # 1x1
Mx4 = ( ( self.x3 - self.x4 ) + ( self.x5 - self.x4 ) ) / 2 # 1x1
My4 = ( ( self.y4 - self.y4 ) + ( self.y5 - self.y4 ) ) / 2 # 1x1
Mx5 = ( ( self.x4 - self.x5 ) + ( self.x6 - self.x5 ) ) / 2 # 1x1
My5 = ( ( self.y4 - self.y5 ) + ( self.y6 - self.y5 ) ) / 2 # 1x1
Mx6 = self.x5 - self.x6 # 1x1
My6 = self.y5 - self.y6 # 1x1
" Use MLP to Predict control inputs "
relative_pose_1 = [ Mx1, My1 ] # tensor data for MLP model
relative_pose_2 = [ Mx2, My2 ] # tensor data for MLP model
relative_pose_3 = [ Mx3, My3 ] # tensor data for MLP model
relative_pose_4 = [ Mx4, My4 ] # tensor data for MLP model
relative_pose_5 = [ Mx5, My5 ] # tensor data for MLP model
relative_pose_6 = [ Mx6, My6 ] # tensor data for MLP model
u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model) # predict control input u1, tensor
u2_predicted = MLP_Model.predict(relative_pose_2, loaded_model) # predict control input u2, tensor
u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model) # predict control input u3, tensor
u4_predicted = MLP_Model.predict(relative_pose_4, loaded_model) # predict control input u4, tensor
u5_predicted = MLP_Model.predict(relative_pose_5, loaded_model) # predict control input u5, tensor
u6_predicted = MLP_Model.predict(relative_pose_6, loaded_model) # predict control input u6, tensor
u1_predicted_np = np.array([[ u1_predicted[0][0] ], [ u1_predicted[0][1] ]]) # from tensor to numpy array for calculation
u2_predicted_np = np.array([[ u2_predicted[0][0] ], [ u2_predicted[0][1] ]]) # from tensor to numpy array for calculation
u3_predicted_np = np.array([[ u3_predicted[0][0] ], [ u3_predicted[0][1] ]]) # from tensor to numpy array for calculation
u4_predicted_np = np.array([[ u4_predicted[0][0] ], [ u4_predicted[0][1] ]]) # from tensor to numpy array for calculation
u5_predicted_np = np.array([[ u5_predicted[0][0] ], [ u5_predicted[0][1] ]]) # from tensor to numpy array for calculation
u6_predicted_np = np.array([[ u6_predicted[0][0] ], [ u6_predicted[0][1] ]]) # from tensor to numpy array for calculation
" Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 "
S1 = np.array([[self.v1], [self.w1]]) #2x1
G1 = np.array([[1,0], [0,1/L]]) #2x2
R1 = np.array([[math.cos(self.Theta1),math.sin(self.Theta1)],[-math.sin(self.Theta1),math.cos(self.Theta1)]]) #2x2
S1 = np.dot(np.dot(G1, R1), u1_predicted_np) #2x1
S2 = np.array([[self.v2], [self.w2]]) #2x1
G2 = np.array([[1,0], [0,1/L]]) #2x2
R2 = np.array([[math.cos(self.Theta2),math.sin(self.Theta2)],[-math.sin(self.Theta2),math.cos(self.Theta2)]]) #2x2
S2 = np.dot(np.dot(G2, R2), u2_predicted_np) # 2x1
S3 = np.array([[self.v3], [self.w3]]) #2x1
G3 = np.array([[1,0], [0,1/L]]) #2x2
R3 = np.array([[math.cos(self.Theta3),math.sin(self.Theta3)],[-math.sin(self.Theta3),math.cos(self.Theta3)]]) #2x2
S3 = np.dot(np.dot(G3, R3), u3_predicted_np) #2x1
S4 = np.array([[self.v4], [self.w4]]) #2x1
G4 = np.array([[1,0], [0,1/L]]) #2x2
R4 = np.array([[math.cos(self.Theta4),math.sin(self.Theta4)],[-math.sin(self.Theta4),math.cos(self.Theta4)]]) #2x2
S4 = np.dot(np.dot(G4, R4), u4_predicted_np) #2x1
S5 = np.array([[self.v5], [self.w5]]) #2x1
G5 = np.array([[1,0], [0,1/L]]) #2x2
R5 = np.array([[math.cos(self.Theta5),math.sin(self.Theta5)],[-math.sin(self.Theta5),math.cos(self.Theta5)]]) #2x2
S5 = np.dot(np.dot(G5, R5), u5_predicted_np) #2x1
S6 = np.array([[self.v6], [self.w6]]) #2x1
G6 = np.array([[1,0], [0,1/L]]) #2x2
R6 = np.array([[math.cos(self.Theta6),math.sin(self.Theta6)],[-math.sin(self.Theta6),math.cos(self.Theta6)]]) #2x2
S6 = np.dot(np.dot(G6, R6), u6_predicted_np) #2x1
" Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 "
D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]]) #2x2
Di = np.linalg.inv(D) #2x2
Speed_L1 = np.array([[self.vL1], [self.vR1]]) # Vector 2x1 for Speed of Robot 1
Speed_L2 = np.array([[self.vL2], [self.vR2]]) # Vector 2x1 for Speed of Robot 2
Speed_L3 = np.array([[self.vL3], [self.vR3]]) # Vector 2x1 for Speed of Robot 3
Speed_L4 = np.array([[self.vL4], [self.vR4]]) # Vector 2x1 for Speed of Robot 4
Speed_L5 = np.array([[self.vL5], [self.vR5]]) # Vector 2x1 for Speed of Robot 5
Speed_L6 = np.array([[self.vL6], [self.vR6]]) # Vector 2x1 for Speed of Robot 6
M1 = np.array([[S1[0]],[S1[1]]]).reshape(2,1) #2x1
M2 = np.array([[S2[0]],[S2[1]]]).reshape(2,1) #2x1
M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1) #2x1
M4 = np.array([[S4[0]],[S4[1]]]).reshape(2,1) #2x1
M5 = np.array([[S5[0]],[S5[1]]]).reshape(2,1) #2x1
M6 = np.array([[S6[0]],[S6[1]]]).reshape(2,1) #2x1
Speed_L1 = np.dot(Di, M1) # 2x1 (VL1, VR1)
Speed_L2 = np.dot(Di, M2) # 2x1 (VL2, VR2)
Speed_L3 = np.dot(Di, M3) # 2x1 (VL3, VR3)
Speed_L4 = np.dot(Di, M4) # 2x1 (VL4, VR4)
Speed_L5 = np.dot(Di, M5) # 2x1 (VL5, VR5)
Speed_L6 = np.dot(Di, M6) # 2x1 (VL6, VR6)
VL1 = float(Speed_L1[0])
VR1 = float(Speed_L1[1])
VL2 = float(Speed_L2[0])
VR2 = float(Speed_L2[1])
VL3 = float(Speed_L3[0])
VR3 = float(Speed_L3[1])
VL4 = float(Speed_L4[0])
VR4 = float(Speed_L4[1])
VL5 = float(Speed_L5[0])
VR5 = float(Speed_L5[1])
VL6 = float(Speed_L6[0])
VR6 = float(Speed_L6[1])
" Publish Speed Commands to Robot 1 "
msgl1 = Float32()
msgr1 = Float32()
msgl1.data = VL1
msgr1.data = VR1
self.publisher_l1.publish(msgl1)
self.publisher_r1.publish(msgr1)
#self.get_logger().info('Publishing R1: "%s"' % msgr1.data)
" Publish Speed Commands to Robot 2 "
msgl2 = Float32()
msgr2 = Float32()
msgl2.data = VL2
msgr2.data = VR2
self.publisher_l2.publish(msgl2)
self.publisher_r2.publish(msgr2)
" Publish Speed Commands to Robot 3 "
msgl3 = Float32()
msgr3 = Float32()
msgl3.data = VL3
msgr3.data = VR3
self.publisher_l3.publish(msgl3)
self.publisher_r3.publish(msgr3)
" Publish Speed Commands to Robot 4 "
msgl4 = Float32()
msgr4 = Float32()
msgl4.data = VL4
msgr4.data = VR4
self.publisher_l4.publish(msgl4)
self.publisher_r4.publish(msgr4)
" Publish Speed Commands to Robot 5 "
msgl5 = Float32()
msgr5 = Float32()
msgl5.data = VL5
msgr5.data = VR5
self.publisher_l5.publish(msgl5)
self.publisher_r5.publish(msgr5)
" Publish Speed Commands to Robot 6 "
msgl6 = Float32()
msgr6 = Float32()
msgl6.data = VL6
msgr6.data = VR6
self.publisher_l6.publish(msgl6)
self.publisher_r6.publish(msgr6)
self.i += 1
def listener_callback(self, msg):
if msg.transforms[0].child_frame_id == 'robot1' :
self.x1 = msg.transforms[0].transform.translation.x
self.y1 = msg.transforms[0].transform.translation.y
self.xr1 = msg.transforms[0].transform.rotation.x
self.yr1 = msg.transforms[0].transform.rotation.y
self.zr1 = msg.transforms[0].transform.rotation.z
self.wr1 = msg.transforms[0].transform.rotation.w
self.Theta1 = euler_from_quaternion(self.xr1,self.yr1,self.zr1,self.wr1)
if msg.transforms[0].child_frame_id == 'robot2' :
self.x2 = msg.transforms[0].transform.translation.x
self.y2 = msg.transforms[0].transform.translation.y
self.xr2 = msg.transforms[0].transform.rotation.x
self.yr2 = msg.transforms[0].transform.rotation.y
self.zr2 = msg.transforms[0].transform.rotation.z
self.wr2 = msg.transforms[0].transform.rotation.w
self.Theta2 = euler_from_quaternion(self.xr2,self.yr2,self.zr2,self.wr2)
if msg.transforms[0].child_frame_id == 'robot3' :
self.x3 = msg.transforms[0].transform.translation.x
self.y3 = msg.transforms[0].transform.translation.y
self.xr3 = msg.transforms[0].transform.rotation.x
self.yr3 = msg.transforms[0].transform.rotation.y
self.zr3 = msg.transforms[0].transform.rotation.z
self.wr3 = msg.transforms[0].transform.rotation.w
self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3)
if msg.transforms[0].child_frame_id == 'robot4' :
self.x4 = msg.transforms[0].transform.translation.x
self.y4 = msg.transforms[0].transform.translation.y
self.xr4 = msg.transforms[0].transform.rotation.x
self.yr4 = msg.transforms[0].transform.rotation.y
self.zr4 = msg.transforms[0].transform.rotation.z
self.wr4 = msg.transforms[0].transform.rotation.w
self.Theta4 = euler_from_quaternion(self.xr4,self.yr4,self.zr4,self.wr4)
if msg.transforms[0].child_frame_id == 'robot5' :
self.x5 = msg.transforms[0].transform.translation.x
self.y5 = msg.transforms[0].transform.translation.y
self.xr5 = msg.transforms[0].transform.rotation.x
self.yr5 = msg.transforms[0].transform.rotation.y
self.zr5 = msg.transforms[0].transform.rotation.z
self.wr5 = msg.transforms[0].transform.rotation.w
self.Theta5 = euler_from_quaternion(self.xr5,self.yr5,self.zr5,self.wr5)
if msg.transforms[0].child_frame_id == 'robot6' :
self.x6 = msg.transforms[0].transform.translation.x
self.y6 = msg.transforms[0].transform.translation.y
self.xr6 = msg.transforms[0].transform.rotation.x
self.yr6 = msg.transforms[0].transform.rotation.y
self.zr6 = msg.transforms[0].transform.rotation.z
self.wr6 = msg.transforms[0].transform.rotation.w
self.Theta6 = euler_from_quaternion(self.xr6,self.yr6,self.zr6,self.wr6)
def main(args=None):
rclpy.init(args=args)
minimal_publisher = MinimalPublisher()
time.sleep(5)
rclpy.spin(minimal_publisher)
minimal_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 42.073491
| 162
| 0.581347
|
import torch
import MLP_Model
import math
import numpy as np
import rclpy
from rclpy.node import Node
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Float32
import time
L = 1
d = 0.5
A = np.ones(6) - np.identity(6)
ux = np.zeros((6,1))
uy = np.zeros((6,1))
FILE = "model.pth"
loaded_model = MLP_Model.MLP()
loaded_model.load_state_dict(torch.load(FILE))
loaded_model.eval()
def euler_from_quaternion(x, y, z, w):
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return yaw_z
class MinimalPublisher(Node):
def __init__(self):
super().__init__('minimal_publisher1')
self.publisher_l1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 0)
self.publisher_r1 = self.create_publisher(Float32, '/rightMotorSpeedrobot1', 0)
self.publisher_l2 = self.create_publisher(Float32, '/leftMotorSpeedrobot2', 0)
self.publisher_r2 = self.create_publisher(Float32, '/rightMotorSpeedrobot2', 0)
self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0)
self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3', 0)
self.publisher_l4 = self.create_publisher(Float32, '/leftMotorSpeedrobot4', 0)
self.publisher_r4 = self.create_publisher(Float32, '/rightMotorSpeedrobot4', 0)
self.publisher_l5 = self.create_publisher(Float32, '/leftMotorSpeedrobot5', 0)
self.publisher_r5 = self.create_publisher(Float32, '/rightMotorSpeedrobot5', 0)
self.publisher_l6 = self.create_publisher(Float32, '/leftMotorSpeedrobot6', 0)
self.publisher_r6 = self.create_publisher(Float32, '/rightMotorSpeedrobot6', 0)
self.subscription = self.create_subscription(
TFMessage,
'/tf',
self.listener_callback,
0)
timer_period = 0.01
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
self.k = 1
self.scene = 0
self.x1 = 0
self.y1 = 0
self.Theta1 = 0
self.v1 = 0
self.w1 = 0
self.vL1 = 0
self.vR1 = 0
self.x2 = 0
self.y2 = 0
self.Theta2 = 0
self.v2 = 0
self.w2 = 0
self.vL2 = 0
self.vR2 = 0
self.x3 = 0
self.y3 = 0
self.Theta3 = 0
self.v3 = 0
self.w3 = 0
self.vL3 = 0
self.vR3 = 0
self.x4 = 0
self.y4 = 0
self.Theta4 = 0
self.v4 = 0
self.w4 = 0
self.vL4 = 0
self.vR4 = 0
self.x5 = 0
self.y5 = 0
self.Theta5 = 0
self.v5 = 0
self.w5 = 0
self.vL5 = 0
self.vR5 = 0
self.x6 = 0
self.y6 = 0
self.Theta6 = 0
self.v6 = 0
self.w6 = 0
self.vL6 = 0
self.vR6 = 0
def timer_callback(self):
Mx1 = self.x2 - self.x1
My1 = self.y2 - self.y1
Mx2 = ( ( self.x1 - self.x2 ) + ( self.x3 - self.x2 ) ) / 2
My2 = ( ( self.y1 - self.y2 ) + ( self.y3 - self.y2 ) ) / 2
Mx3 = ( ( self.x2 - self.x3 ) + ( self.x4 - self.x3 ) ) / 2
My3 = ( ( self.y2 - self.y3 ) + ( self.y4 - self.y3 ) ) / 2
Mx4 = ( ( self.x3 - self.x4 ) + ( self.x5 - self.x4 ) ) / 2
My4 = ( ( self.y4 - self.y4 ) + ( self.y5 - self.y4 ) ) / 2
Mx5 = ( ( self.x4 - self.x5 ) + ( self.x6 - self.x5 ) ) / 2
My5 = ( ( self.y4 - self.y5 ) + ( self.y6 - self.y5 ) ) / 2
Mx6 = self.x5 - self.x6
My6 = self.y5 - self.y6
relative_pose_1 = [ Mx1, My1 ]
relative_pose_2 = [ Mx2, My2 ]
relative_pose_3 = [ Mx3, My3 ]
relative_pose_4 = [ Mx4, My4 ]
relative_pose_5 = [ Mx5, My5 ]
relative_pose_6 = [ Mx6, My6 ]
u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model)
u2_predicted = MLP_Model.predict(relative_pose_2, loaded_model)
u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model)
u4_predicted = MLP_Model.predict(relative_pose_4, loaded_model)
u5_predicted = MLP_Model.predict(relative_pose_5, loaded_model)
u6_predicted = MLP_Model.predict(relative_pose_6, loaded_model)
u1_predicted_np = np.array([[ u1_predicted[0][0] ], [ u1_predicted[0][1] ]])
u2_predicted_np = np.array([[ u2_predicted[0][0] ], [ u2_predicted[0][1] ]])
u3_predicted_np = np.array([[ u3_predicted[0][0] ], [ u3_predicted[0][1] ]])
u4_predicted_np = np.array([[ u4_predicted[0][0] ], [ u4_predicted[0][1] ]])
u5_predicted_np = np.array([[ u5_predicted[0][0] ], [ u5_predicted[0][1] ]])
u6_predicted_np = np.array([[ u6_predicted[0][0] ], [ u6_predicted[0][1] ]])
S1 = np.array([[self.v1], [self.w1]])
G1 = np.array([[1,0], [0,1/L]])
R1 = np.array([[math.cos(self.Theta1),math.sin(self.Theta1)],[-math.sin(self.Theta1),math.cos(self.Theta1)]])
S1 = np.dot(np.dot(G1, R1), u1_predicted_np)
S2 = np.array([[self.v2], [self.w2]])
G2 = np.array([[1,0], [0,1/L]])
R2 = np.array([[math.cos(self.Theta2),math.sin(self.Theta2)],[-math.sin(self.Theta2),math.cos(self.Theta2)]])
S2 = np.dot(np.dot(G2, R2), u2_predicted_np)
S3 = np.array([[self.v3], [self.w3]])
G3 = np.array([[1,0], [0,1/L]])
R3 = np.array([[math.cos(self.Theta3),math.sin(self.Theta3)],[-math.sin(self.Theta3),math.cos(self.Theta3)]])
S3 = np.dot(np.dot(G3, R3), u3_predicted_np)
S4 = np.array([[self.v4], [self.w4]])
G4 = np.array([[1,0], [0,1/L]])
R4 = np.array([[math.cos(self.Theta4),math.sin(self.Theta4)],[-math.sin(self.Theta4),math.cos(self.Theta4)]])
S4 = np.dot(np.dot(G4, R4), u4_predicted_np)
S5 = np.array([[self.v5], [self.w5]])
G5 = np.array([[1,0], [0,1/L]])
R5 = np.array([[math.cos(self.Theta5),math.sin(self.Theta5)],[-math.sin(self.Theta5),math.cos(self.Theta5)]])
S5 = np.dot(np.dot(G5, R5), u5_predicted_np)
S6 = np.array([[self.v6], [self.w6]])
G6 = np.array([[1,0], [0,1/L]])
R6 = np.array([[math.cos(self.Theta6),math.sin(self.Theta6)],[-math.sin(self.Theta6),math.cos(self.Theta6)]])
S6 = np.dot(np.dot(G6, R6), u6_predicted_np)
D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]])
Di = np.linalg.inv(D)
Speed_L1 = np.array([[self.vL1], [self.vR1]])
Speed_L2 = np.array([[self.vL2], [self.vR2]])
Speed_L3 = np.array([[self.vL3], [self.vR3]])
Speed_L4 = np.array([[self.vL4], [self.vR4]])
Speed_L5 = np.array([[self.vL5], [self.vR5]])
Speed_L6 = np.array([[self.vL6], [self.vR6]])
M1 = np.array([[S1[0]],[S1[1]]]).reshape(2,1)
M2 = np.array([[S2[0]],[S2[1]]]).reshape(2,1)
M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1)
M4 = np.array([[S4[0]],[S4[1]]]).reshape(2,1)
M5 = np.array([[S5[0]],[S5[1]]]).reshape(2,1)
M6 = np.array([[S6[0]],[S6[1]]]).reshape(2,1)
Speed_L1 = np.dot(Di, M1)
Speed_L2 = np.dot(Di, M2)
Speed_L3 = np.dot(Di, M3)
Speed_L4 = np.dot(Di, M4)
Speed_L5 = np.dot(Di, M5)
Speed_L6 = np.dot(Di, M6)
VL1 = float(Speed_L1[0])
VR1 = float(Speed_L1[1])
VL2 = float(Speed_L2[0])
VR2 = float(Speed_L2[1])
VL3 = float(Speed_L3[0])
VR3 = float(Speed_L3[1])
VL4 = float(Speed_L4[0])
VR4 = float(Speed_L4[1])
VL5 = float(Speed_L5[0])
VR5 = float(Speed_L5[1])
VL6 = float(Speed_L6[0])
VR6 = float(Speed_L6[1])
msgl1 = Float32()
msgr1 = Float32()
msgl1.data = VL1
msgr1.data = VR1
self.publisher_l1.publish(msgl1)
self.publisher_r1.publish(msgr1)
msgl2 = Float32()
msgr2 = Float32()
msgl2.data = VL2
msgr2.data = VR2
self.publisher_l2.publish(msgl2)
self.publisher_r2.publish(msgr2)
msgl3 = Float32()
msgr3 = Float32()
msgl3.data = VL3
msgr3.data = VR3
self.publisher_l3.publish(msgl3)
self.publisher_r3.publish(msgr3)
msgl4 = Float32()
msgr4 = Float32()
msgl4.data = VL4
msgr4.data = VR4
self.publisher_l4.publish(msgl4)
self.publisher_r4.publish(msgr4)
msgl5 = Float32()
msgr5 = Float32()
msgl5.data = VL5
msgr5.data = VR5
self.publisher_l5.publish(msgl5)
self.publisher_r5.publish(msgr5)
msgl6 = Float32()
msgr6 = Float32()
msgl6.data = VL6
msgr6.data = VR6
self.publisher_l6.publish(msgl6)
self.publisher_r6.publish(msgr6)
self.i += 1
def listener_callback(self, msg):
if msg.transforms[0].child_frame_id == 'robot1' :
self.x1 = msg.transforms[0].transform.translation.x
self.y1 = msg.transforms[0].transform.translation.y
self.xr1 = msg.transforms[0].transform.rotation.x
self.yr1 = msg.transforms[0].transform.rotation.y
self.zr1 = msg.transforms[0].transform.rotation.z
self.wr1 = msg.transforms[0].transform.rotation.w
self.Theta1 = euler_from_quaternion(self.xr1,self.yr1,self.zr1,self.wr1)
if msg.transforms[0].child_frame_id == 'robot2' :
self.x2 = msg.transforms[0].transform.translation.x
self.y2 = msg.transforms[0].transform.translation.y
self.xr2 = msg.transforms[0].transform.rotation.x
self.yr2 = msg.transforms[0].transform.rotation.y
self.zr2 = msg.transforms[0].transform.rotation.z
self.wr2 = msg.transforms[0].transform.rotation.w
self.Theta2 = euler_from_quaternion(self.xr2,self.yr2,self.zr2,self.wr2)
if msg.transforms[0].child_frame_id == 'robot3' :
self.x3 = msg.transforms[0].transform.translation.x
self.y3 = msg.transforms[0].transform.translation.y
self.xr3 = msg.transforms[0].transform.rotation.x
self.yr3 = msg.transforms[0].transform.rotation.y
self.zr3 = msg.transforms[0].transform.rotation.z
self.wr3 = msg.transforms[0].transform.rotation.w
self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3)
if msg.transforms[0].child_frame_id == 'robot4' :
self.x4 = msg.transforms[0].transform.translation.x
self.y4 = msg.transforms[0].transform.translation.y
self.xr4 = msg.transforms[0].transform.rotation.x
self.yr4 = msg.transforms[0].transform.rotation.y
self.zr4 = msg.transforms[0].transform.rotation.z
self.wr4 = msg.transforms[0].transform.rotation.w
self.Theta4 = euler_from_quaternion(self.xr4,self.yr4,self.zr4,self.wr4)
if msg.transforms[0].child_frame_id == 'robot5' :
self.x5 = msg.transforms[0].transform.translation.x
self.y5 = msg.transforms[0].transform.translation.y
self.xr5 = msg.transforms[0].transform.rotation.x
self.yr5 = msg.transforms[0].transform.rotation.y
self.zr5 = msg.transforms[0].transform.rotation.z
self.wr5 = msg.transforms[0].transform.rotation.w
self.Theta5 = euler_from_quaternion(self.xr5,self.yr5,self.zr5,self.wr5)
if msg.transforms[0].child_frame_id == 'robot6' :
self.x6 = msg.transforms[0].transform.translation.x
self.y6 = msg.transforms[0].transform.translation.y
self.xr6 = msg.transforms[0].transform.rotation.x
self.yr6 = msg.transforms[0].transform.rotation.y
self.zr6 = msg.transforms[0].transform.rotation.z
self.wr6 = msg.transforms[0].transform.rotation.w
self.Theta6 = euler_from_quaternion(self.xr6,self.yr6,self.zr6,self.wr6)
def main(args=None):
rclpy.init(args=args)
minimal_publisher = MinimalPublisher()
time.sleep(5)
rclpy.spin(minimal_publisher)
minimal_publisher.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| true
| true
|
790b522cc3dd60825737300e5cdb957d52ed0a5f
| 820
|
py
|
Python
|
demos/localized/MainWindow.py
|
hsoft/xibless
|
a7393d28b4a31698869b2203d4d8b3398de1de7f
|
[
"BSD-3-Clause"
] | 2
|
2016-09-13T12:34:34.000Z
|
2018-05-15T01:06:53.000Z
|
demos/localized/MainWindow.py
|
hsoft/xibless
|
a7393d28b4a31698869b2203d4d8b3398de1de7f
|
[
"BSD-3-Clause"
] | null | null | null |
demos/localized/MainWindow.py
|
hsoft/xibless
|
a7393d28b4a31698869b2203d4d8b3398de1de7f
|
[
"BSD-3-Clause"
] | null | null | null |
ownerclass = 'AppDelegate'
ownerimport = 'AppDelegate.h'
# Init
result = Window(330, 110, "Tell me your name!")
nameLabel = Label(result, text="Name:")
nameLabel.width = 45
nameField = TextField(result, text="")
helloLabel = Label(result, text="")
button = Button(result, title="Say Hello", action=Action(owner, 'sayHello'))
button.width = 100
# Owner Assignments
owner.nameField = nameField
owner.helloLabel = helloLabel
# Layout
nameLabel.moveTo(Pack.UpperLeft)
nameField.moveNextTo(nameLabel, Pack.Right, Pack.Middle)
nameField.fill(Pack.Right)
helloLabel.moveNextTo(nameLabel, Pack.Below, Pack.Left)
helloLabel.fill(Pack.Right)
button.moveNextTo(helloLabel, Pack.Below, Pack.Right)
nameField.setAnchor(Pack.UpperLeft, growX=True)
helloLabel.setAnchor(Pack.UpperLeft, growX=True)
button.setAnchor(Pack.UpperRight)
| 30.37037
| 76
| 0.776829
|
ownerclass = 'AppDelegate'
ownerimport = 'AppDelegate.h'
result = Window(330, 110, "Tell me your name!")
nameLabel = Label(result, text="Name:")
nameLabel.width = 45
nameField = TextField(result, text="")
helloLabel = Label(result, text="")
button = Button(result, title="Say Hello", action=Action(owner, 'sayHello'))
button.width = 100
owner.nameField = nameField
owner.helloLabel = helloLabel
nameLabel.moveTo(Pack.UpperLeft)
nameField.moveNextTo(nameLabel, Pack.Right, Pack.Middle)
nameField.fill(Pack.Right)
helloLabel.moveNextTo(nameLabel, Pack.Below, Pack.Left)
helloLabel.fill(Pack.Right)
button.moveNextTo(helloLabel, Pack.Below, Pack.Right)
nameField.setAnchor(Pack.UpperLeft, growX=True)
helloLabel.setAnchor(Pack.UpperLeft, growX=True)
button.setAnchor(Pack.UpperRight)
| true
| true
|
790b5279a1b26a859351ee80c80c069b39ca7510
| 5,790
|
py
|
Python
|
cave/analyzer/parameter_importance/fanova.py
|
deslay1/CAVE
|
e4b9abc3812034f49dddd27ffc17dbab39782a1c
|
[
"BSD-3-Clause"
] | 45
|
2018-01-11T11:26:11.000Z
|
2021-06-22T06:14:39.000Z
|
cave/analyzer/parameter_importance/fanova.py
|
deslay1/CAVE
|
e4b9abc3812034f49dddd27ffc17dbab39782a1c
|
[
"BSD-3-Clause"
] | 150
|
2017-12-20T16:14:45.000Z
|
2021-09-28T11:26:33.000Z
|
cave/analyzer/parameter_importance/fanova.py
|
automl/SpySMAC
|
afcbecd0b9cb97276625c16a89cb6df141e6f6f2
|
[
"BSD-3-Clause"
] | 17
|
2018-03-17T04:46:09.000Z
|
2021-02-18T18:31:38.000Z
|
import operator
import os
from collections import OrderedDict
from pandas import DataFrame
from cave.analyzer.parameter_importance.base_parameter_importance import BaseParameterImportance
class Fanova(BaseParameterImportance):
"""
fANOVA (functional analysis of variance) computes the fraction of the variance in the cost space explained by
changing a parameter by marginalizing over all other parameters, for each parameter (or for pairs of
parameters). Parameters with high importance scores will have a large impact on the performance. To this end, a
random forest is trained as an empirical performance model on the available empirical data from the available
runhistories.
"""
def __init__(self,
runscontainer,
marginal_threshold=0.05):
"""Wrapper for parameter_importance to save the importance-object/ extract the results. We want to show the
top X most important parameter-fanova-plots.
Parameters
----------
runscontainer: RunsContainer
contains all important information about the configurator runs
marginal_threshold: float
parameter/s must be at least this important to be mentioned
"""
super().__init__(runscontainer)
self.marginal_threshold = marginal_threshold
self.parameter_importance("fanova")
def get_name(self):
return 'fANOVA'
def postprocess(self, pimp, output_dir):
result = OrderedDict()
def parse_pairwise(p):
"""parse pimp's way of having pairwise parameters as key as str and return list of individuals"""
res = [tmp.strip('\' ') for tmp in p.strip('[]').split(',')]
return res
parameter_imp = {k: v * 100 for k, v in pimp.evaluator.evaluated_parameter_importance.items()}
param_imp_std = {}
if hasattr(pimp.evaluator, 'evaluated_parameter_importance_uncertainty'):
param_imp_std = {k: v * 100 for k, v in pimp.evaluator.evaluated_parameter_importance_uncertainty.items()}
for k in parameter_imp.keys():
self.logger.debug("fanova-importance for %s: mean (over trees): %f, std: %s", k, parameter_imp[k],
str(param_imp_std[k]) if param_imp_std else 'N/A')
# Split single and pairwise (pairwise are string: "['p1','p2']")
single_imp = {k: v for k, v in parameter_imp.items() if not k.startswith('[') and v > self.marginal_threshold}
pairwise_imp = {k: v for k, v in parameter_imp.items() if k.startswith('[') and v > self.marginal_threshold}
# Set internal parameter importance for further analysis (such as parallel coordinates)
self.fanova_single_importance = single_imp
self.fanova_pairwise_importance = single_imp
# Dicts to lists of tuples, sorted descending after importance
single_imp = OrderedDict(sorted(single_imp.items(), key=operator.itemgetter(1), reverse=True))
pairwise_imp = OrderedDict(sorted(pairwise_imp.items(), key=operator.itemgetter(1), reverse=True))
# Create table
table = []
if len(single_imp) > 0:
table.extend([(20*"-"+" Single importance: "+20*"-", 20*"-")])
for k, v in single_imp.items():
value = str(round(v, 4))
if param_imp_std:
value += " +/- " + str(round(param_imp_std[k], 4))
table.append((k, value))
if len(pairwise_imp) > 0:
table.extend([(20*"-"+" Pairwise importance: "+20*"-", 20*"-")])
for k, v in pairwise_imp.items():
name = ' & '.join(parse_pairwise(k))
value = str(round(v, 4))
if param_imp_std:
value += " +/- " + str(round(param_imp_std[k], 4))
table.append((name, value))
keys, fanova_table = [k[0] for k in table], [k[1:] for k in table]
df = DataFrame(data=fanova_table, index=keys)
result['Importance'] = {'table': df.to_html(escape=False, header=False, index=True, justify='left')}
# Get plot-paths
result['Marginals'] = {p: {'figure': os.path.join(output_dir, "fanova", p + '.png')} for p in single_imp.keys()}
# Right now no way to access paths of the plots -> file issue
pairwise_plots = {" & ".join(parse_pairwise(p)):
os.path.join(output_dir, 'fanova', '_'.join(parse_pairwise(p)) + '.png')
for p in pairwise_imp.keys()}
result['Pairwise Marginals'] = {p: {'figure': path}
for p, path in pairwise_plots.items() if os.path.exists(path)}
return result
def get_jupyter(self):
from IPython.core.display import HTML, Image, display
for b, result in self.result.items():
error = self.result[b]['else'] if 'else' in self.result[b] else None
if error:
display(HTML(error))
else:
# Show table
display(HTML(self.result[b]["Importance"]["table"]))
# Show plots
display(*list([Image(filename=d["figure"]) for d in self.result[b]['Marginals'].values()]))
display(*list([Image(filename=d["figure"]) for d in self.result[b]['Pairwise Marginals'].values()]))
# While working for a prettier solution, this might be an option:
# display(HTML(figure_to_html([d["figure"] for d in self.result[b]['Marginals'].values()] +
# [d["figure"] for d in self.result[b]['Pairwise Marginals'].values()],
# max_in_a_row=3, true_break_between_rows=True)))
| 49.067797
| 120
| 0.604836
|
import operator
import os
from collections import OrderedDict
from pandas import DataFrame
from cave.analyzer.parameter_importance.base_parameter_importance import BaseParameterImportance
class Fanova(BaseParameterImportance):
def __init__(self,
runscontainer,
marginal_threshold=0.05):
super().__init__(runscontainer)
self.marginal_threshold = marginal_threshold
self.parameter_importance("fanova")
def get_name(self):
return 'fANOVA'
def postprocess(self, pimp, output_dir):
result = OrderedDict()
def parse_pairwise(p):
res = [tmp.strip('\' ') for tmp in p.strip('[]').split(',')]
return res
parameter_imp = {k: v * 100 for k, v in pimp.evaluator.evaluated_parameter_importance.items()}
param_imp_std = {}
if hasattr(pimp.evaluator, 'evaluated_parameter_importance_uncertainty'):
param_imp_std = {k: v * 100 for k, v in pimp.evaluator.evaluated_parameter_importance_uncertainty.items()}
for k in parameter_imp.keys():
self.logger.debug("fanova-importance for %s: mean (over trees): %f, std: %s", k, parameter_imp[k],
str(param_imp_std[k]) if param_imp_std else 'N/A')
# Split single and pairwise (pairwise are string: "['p1','p2']")
single_imp = {k: v for k, v in parameter_imp.items() if not k.startswith('[') and v > self.marginal_threshold}
pairwise_imp = {k: v for k, v in parameter_imp.items() if k.startswith('[') and v > self.marginal_threshold}
# Set internal parameter importance for further analysis (such as parallel coordinates)
self.fanova_single_importance = single_imp
self.fanova_pairwise_importance = single_imp
# Dicts to lists of tuples, sorted descending after importance
single_imp = OrderedDict(sorted(single_imp.items(), key=operator.itemgetter(1), reverse=True))
pairwise_imp = OrderedDict(sorted(pairwise_imp.items(), key=operator.itemgetter(1), reverse=True))
# Create table
table = []
if len(single_imp) > 0:
table.extend([(20*"-"+" Single importance: "+20*"-", 20*"-")])
for k, v in single_imp.items():
value = str(round(v, 4))
if param_imp_std:
value += " +/- " + str(round(param_imp_std[k], 4))
table.append((k, value))
if len(pairwise_imp) > 0:
table.extend([(20*"-"+" Pairwise importance: "+20*"-", 20*"-")])
for k, v in pairwise_imp.items():
name = ' & '.join(parse_pairwise(k))
value = str(round(v, 4))
if param_imp_std:
value += " +/- " + str(round(param_imp_std[k], 4))
table.append((name, value))
keys, fanova_table = [k[0] for k in table], [k[1:] for k in table]
df = DataFrame(data=fanova_table, index=keys)
result['Importance'] = {'table': df.to_html(escape=False, header=False, index=True, justify='left')}
# Get plot-paths
result['Marginals'] = {p: {'figure': os.path.join(output_dir, "fanova", p + '.png')} for p in single_imp.keys()}
# Right now no way to access paths of the plots -> file issue
pairwise_plots = {" & ".join(parse_pairwise(p)):
os.path.join(output_dir, 'fanova', '_'.join(parse_pairwise(p)) + '.png')
for p in pairwise_imp.keys()}
result['Pairwise Marginals'] = {p: {'figure': path}
for p, path in pairwise_plots.items() if os.path.exists(path)}
return result
def get_jupyter(self):
from IPython.core.display import HTML, Image, display
for b, result in self.result.items():
error = self.result[b]['else'] if 'else' in self.result[b] else None
if error:
display(HTML(error))
else:
# Show table
display(HTML(self.result[b]["Importance"]["table"]))
# Show plots
display(*list([Image(filename=d["figure"]) for d in self.result[b]['Marginals'].values()]))
display(*list([Image(filename=d["figure"]) for d in self.result[b]['Pairwise Marginals'].values()]))
# While working for a prettier solution, this might be an option:
# display(HTML(figure_to_html([d["figure"] for d in self.result[b]['Marginals'].values()] +
# [d["figure"] for d in self.result[b]['Pairwise Marginals'].values()],
# max_in_a_row=3, true_break_between_rows=True)))
| true
| true
|
790b52ca08302587366902f6c3c2a553c5bee8de
| 1,548
|
py
|
Python
|
config/wsgi.py
|
Murithi/lacuna
|
752c86f9f28bc431fba1e19cec9669da89beb1aa
|
[
"BSD-3-Clause"
] | null | null | null |
config/wsgi.py
|
Murithi/lacuna
|
752c86f9f28bc431fba1e19cec9669da89beb1aa
|
[
"BSD-3-Clause"
] | null | null | null |
config/wsgi.py
|
Murithi/lacuna
|
752c86f9f28bc431fba1e19cec9669da89beb1aa
|
[
"BSD-3-Clause"
] | null | null | null |
"""
WSGI config for lacuna project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 39.692308
| 79
| 0.805556
|
import os
from django.core.wsgi import get_wsgi_application
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# setting points here.
application = get_wsgi_application()
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| true
| true
|
790b53142173212bb718f366b723b78c3ebc9deb
| 7,028
|
py
|
Python
|
qtmultimedia/tests/auto/runautotests.py
|
wgnet/wds_qt
|
8db722fd367d2d0744decf99ac7bafaba8b8a3d3
|
[
"Apache-2.0"
] | 1
|
2020-04-30T15:47:35.000Z
|
2020-04-30T15:47:35.000Z
|
qtmultimedia/tests/auto/runautotests.py
|
wgnet/wds_qt
|
8db722fd367d2d0744decf99ac7bafaba8b8a3d3
|
[
"Apache-2.0"
] | null | null | null |
qtmultimedia/tests/auto/runautotests.py
|
wgnet/wds_qt
|
8db722fd367d2d0744decf99ac7bafaba8b8a3d3
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the build configuration tools of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL21$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see http://www.qt.io/terms-conditions. For further
## information use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## As a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys;
import re;
import os;
import subprocess;
import errno;
instructions = "This script can be used as follows:\n\
a) if run from tests/auto without any arguments it runs unit tests and then integration tests\n\
b) if run from tests/auto/unit, it runs unit tests\n\
c) if run from tests/auto/integration, it runs integration tests\n\
d) if run from tests/auto with \"unit\" it runs unit tests, and correspondingly for \"integration\""
# Colors
red="\033[41;37m";
redfg="\033[31m";
norm="\033[0m";
green="\033[32m";
grey="\033[37m";
yellow="\033[33m";
# Variables
curtest = "";
numpasses = [0];
numfails = [0];
numcrashes = 0;
numx = [0];
runTests = []
notRunTests = []
# Do not run the tests in these directories.
exclusionList = ["qdeclarativevideo", "qmultimedia_common"]
# Helper function for replacing stuffs
def print_color_string(string, color, match, index):
if index > 0:
print string[:match.start(index)] + color + string[match.start(index):match.end(index)] + norm + string[match.end(index):],
else:
print color + string[:-1] + norm
# AWK translation
awkfoo = [
(re.compile("\*\*\*\*\*\*\*\*\* Start testing of (\S+)"), yellow, 1, curtest),
(re.compile("^(PASS) "), green, 1, numpasses),
(re.compile("^(FAIL!) "), red, 0, numfails),
(re.compile("^(XFAIL) "), redfg, 1, numx),
(re.compile("^(XPASS) "), redfg, 1, numx),
(re.compile("^(QFATAL) "), red, 0, numx),
(re.compile("^(QDEBUG) "), grey, 0, None),
(re.compile("^(QWARN) "), yellow, 1, None),
(re.compile("\*\*\*\*\*\*\*\*\* Finished testing of (\S+)"), yellow, 1, curtest),
]
#
# This method runs the test cases, color codes the output from the test cases and adds up the passes,
# fails etc.
#
def resultSummary(arg):
try:
pp = subprocess.Popen(arg, shell=False,stderr=subprocess.STDOUT,stdout=subprocess.PIPE);
p = pp.stdout;
try:
while True:
line = p.readline()
if len(line) == 0:
break
for (re, color, index, var) in awkfoo:
m = re.match(line)
if m:
break
if m:
print_color_string(line, color, m, index)
if isinstance(var, list):
var[0] = var[0] + 1;
else:
var = m.groups(index)
else:
print line,
finally:
rc = p.close();
pp.wait();
if pp.returncode < 0:
print red + "Error: '%s' exited with signal %d" % (arg, -pp.returncode) + norm
numcrashes = numcrashes + 1
except OSError, e:
if e.errno == errno.ENOENT:
print red + "Test '%s' not found." % arg + norm;
else:
print red + "Got an exception running '%s': %s " % (arg, e.strerror) + norm
numcrashes = numcrashes + 1
#
# This method finds the test cases that should be run and runs them.
#
def runAllTests(test):
for filename in os.listdir(test):
if(re.search("^q", filename)):
#Skip the dir if it is in the exclusion list.
exclude = False
for dir in exclusionList:
if(re.search(dir, filename)):
exclude = True
if(not(exclude)):
#Set path to this if on Windows
if(os.name=="nt"):
exePath = test+"\\"+filename+"\\debug\\tst_"+filename+".exe"
#Set path on OS X
if(sys.platform=="darwin"):
exePath = test +"/"+filename+"/tst_"+filename
if not (os.path.exists(exePath)):
exePath = test + "/"+filename+"/tst_"+filename+".app/Contents/MacOS/tst_"+filename
#Set path to this if on Unix
else:
exePath = test +"/"+filename+"/tst_"+filename
if(os.path.exists(exePath)):
runTests.append(filename)
resultSummary(exePath);
else:
notRunTests.append(filename)
arguments = sys.argv[1:]
count = len(arguments)
# Find the current working directory.
cwd = os.getcwd()
if(count == 0):
if re.search("auto$", cwd):
x = 0
runAllTests("unit")
runAllTests("integration")
elif re.search("unit$", cwd):
runAllTests(cwd)
elif re.search("integration$", cwd):
runAllTests(cwd)
else:
print "You are running this script from the wrong directory! " + instructions
exit()
elif(count == 1):
if os.path.exists(sys.argv[1]):
runAllTests(sys.argv[1])
else:
print sys.argv[1] + " test cases do not exist! " + instructions
exit()
else:
print "You have passed too many arguments! " + instructions
exit()
print "Total of all tests: %d passes, %d failures, %d unexpected, %d badnesses." % (numpasses[0], numfails[0], numx[0], numcrashes);
if runTests:
print "The following test cases were run: "
for testCase in runTests:
print testCase
else:
print "No test cases were run!"
if notRunTests:
print "The following test cases could not be run: "
for testCase in notRunTests:
print testCase
else:
print "All test cases were run."
| 34.45098
| 132
| 0.581816
| false
| true
|
|
790b534925e3065065ceefebd43933f99abb3e34
| 392
|
py
|
Python
|
selenium_test/action.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
selenium_test/action.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
selenium_test/action.py
|
fuandenghuo/100-days-of-python
|
50f3263b0984bb6690e565d58604c1882aaf465e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'abbot'
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.PhantomJS(executable_path='/Users/wangbo/Downloads/phantomjs-2.1.1-macosx/bin/phantomjs')
ac = driver.find_element_by_xpath('element')
ActionChains(driver).move_to_element(ac).perform()
ActionChains(driver).move_to_element(ac).click(ac).perform()
| 26.133333
| 108
| 0.783163
|
__author__ = 'abbot'
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.PhantomJS(executable_path='/Users/wangbo/Downloads/phantomjs-2.1.1-macosx/bin/phantomjs')
ac = driver.find_element_by_xpath('element')
ActionChains(driver).move_to_element(ac).perform()
ActionChains(driver).move_to_element(ac).click(ac).perform()
| true
| true
|
790b55568cd5002de26cd12a8eb550d80d758f5d
| 2,764
|
py
|
Python
|
rr/views/contact.py
|
UniversityofHelsinki/sp-registry
|
b1336b89788c076bf93f61b97b5469a99acd902c
|
[
"MIT"
] | null | null | null |
rr/views/contact.py
|
UniversityofHelsinki/sp-registry
|
b1336b89788c076bf93f61b97b5469a99acd902c
|
[
"MIT"
] | 1
|
2020-08-10T13:16:58.000Z
|
2020-08-18T06:30:20.000Z
|
rr/views/contact.py
|
UniversityofHelsinki/sp-registry
|
b1336b89788c076bf93f61b97b5469a99acd902c
|
[
"MIT"
] | null | null | null |
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils import timezone
from django.utils.translation import ugettext as _
from rr.forms.contact import ContactForm
from rr.models.contact import Contact
from rr.utils.serviceprovider import get_service_provider
logger = logging.getLogger(__name__)
@login_required
def contact_list(request, pk):
"""
Displays a list of :model:`rr.Contact` linked to
:model:`rr.ServiceProvider`.
Includes a ModelForm for adding :model:`rr.Contact` to
:model:`rr.ServiceProvider`.
**Context**
``object_list``
List of :model:`rr.Contact`.
``form``
ModelForm for creating a :model:`rr.Contact`
``object``
An instance of :model:`rr.ServiceProvider`.
**Template:**
:template:`rr/contact.html`
"""
sp = get_service_provider(pk, request.user)
form = ContactForm(sp=sp)
if request.method == "POST":
if "add_contact" in request.POST:
form = _add_contact(request, sp)
elif "remove_contact" in request.POST:
_remove_contacts(request, sp)
contacts = Contact.objects.filter(sp=sp, end_at=None)
return render(request, "rr/contact.html", {'object_list': contacts,
'form': form,
'object': sp})
def _add_contact(request, sp):
form = ContactForm(request.POST, sp=sp)
if form.is_valid():
contact_type = form.cleaned_data['type']
firstname = form.cleaned_data['firstname']
lastname = form.cleaned_data['lastname']
email = form.cleaned_data['email']
Contact.objects.create(sp=sp,
type=contact_type,
firstname=firstname,
lastname=lastname,
email=email)
sp.save_modified()
logger.info("Contact added for {sp} by {user}"
.format(sp=sp, user=request.user))
messages.add_message(request, messages.INFO, _('Contact added.'))
form = ContactForm(sp=sp)
return form
def _remove_contacts(request, sp):
for key, value in request.POST.dict().items():
if value == "on":
contact = Contact.objects.get(pk=key)
if contact.sp == sp:
contact.end_at = timezone.now()
contact.save()
sp.save_modified()
logger.info("Contact removed from {sp} by {user}"
.format(sp=sp, user=request.user))
messages.add_message(request, messages.INFO, _('Contact removed.'))
| 33.301205
| 83
| 0.596599
|
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils import timezone
from django.utils.translation import ugettext as _
from rr.forms.contact import ContactForm
from rr.models.contact import Contact
from rr.utils.serviceprovider import get_service_provider
logger = logging.getLogger(__name__)
@login_required
def contact_list(request, pk):
sp = get_service_provider(pk, request.user)
form = ContactForm(sp=sp)
if request.method == "POST":
if "add_contact" in request.POST:
form = _add_contact(request, sp)
elif "remove_contact" in request.POST:
_remove_contacts(request, sp)
contacts = Contact.objects.filter(sp=sp, end_at=None)
return render(request, "rr/contact.html", {'object_list': contacts,
'form': form,
'object': sp})
def _add_contact(request, sp):
form = ContactForm(request.POST, sp=sp)
if form.is_valid():
contact_type = form.cleaned_data['type']
firstname = form.cleaned_data['firstname']
lastname = form.cleaned_data['lastname']
email = form.cleaned_data['email']
Contact.objects.create(sp=sp,
type=contact_type,
firstname=firstname,
lastname=lastname,
email=email)
sp.save_modified()
logger.info("Contact added for {sp} by {user}"
.format(sp=sp, user=request.user))
messages.add_message(request, messages.INFO, _('Contact added.'))
form = ContactForm(sp=sp)
return form
def _remove_contacts(request, sp):
for key, value in request.POST.dict().items():
if value == "on":
contact = Contact.objects.get(pk=key)
if contact.sp == sp:
contact.end_at = timezone.now()
contact.save()
sp.save_modified()
logger.info("Contact removed from {sp} by {user}"
.format(sp=sp, user=request.user))
messages.add_message(request, messages.INFO, _('Contact removed.'))
| true
| true
|
790b55cdd7a462b46bdc4689951b961f789788c7
| 1,311
|
py
|
Python
|
setup.py
|
ChihHsuanLin/bevel
|
c8ac7f203aa3e2b9b44a9d66d1b283ac32680d8b
|
[
"MIT"
] | null | null | null |
setup.py
|
ChihHsuanLin/bevel
|
c8ac7f203aa3e2b9b44a9d66d1b283ac32680d8b
|
[
"MIT"
] | null | null | null |
setup.py
|
ChihHsuanLin/bevel
|
c8ac7f203aa3e2b9b44a9d66d1b283ac32680d8b
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
def filepath(fname):
return os.path.join(os.path.dirname(__file__), fname)
exec(compile(open('bevel/version.py').read(),
'bevel/version.py', 'exec'))
readme_md = filepath('README.md')
try:
import pypandoc
readme_rst = pypandoc.convert_file(readme_md, 'rst')
except(ImportError):
readme_rst = open(readme_md).read()
setup(
name="bevel",
version="0.1.1",
author="Ross Diener, Steven Wu, Cameron Davidson-Pilon",
author_email="ross.diener@shopify.com",
description="Ordinal regression in Python",
license="MIT",
keywords="oridinal regression statistics data analysis",
url="https://github.com/ShopifyPeopleAnalytics/bevel",
packages=[
'bevel',
],
long_description=readme_rst,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering",
],
install_requires=[
"numpy>=1.13.3",
"scipy>=1.0.0",
"pandas>=0.21.0",
"numdifftools>=0.9.20"
],
package_data={
"bevel": [
"../README.md",
"../LICENSE",
]
},
)
| 24.277778
| 60
| 0.591915
|
import os
from setuptools import setup
def filepath(fname):
return os.path.join(os.path.dirname(__file__), fname)
exec(compile(open('bevel/version.py').read(),
'bevel/version.py', 'exec'))
readme_md = filepath('README.md')
try:
import pypandoc
readme_rst = pypandoc.convert_file(readme_md, 'rst')
except(ImportError):
readme_rst = open(readme_md).read()
setup(
name="bevel",
version="0.1.1",
author="Ross Diener, Steven Wu, Cameron Davidson-Pilon",
author_email="ross.diener@shopify.com",
description="Ordinal regression in Python",
license="MIT",
keywords="oridinal regression statistics data analysis",
url="https://github.com/ShopifyPeopleAnalytics/bevel",
packages=[
'bevel',
],
long_description=readme_rst,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering",
],
install_requires=[
"numpy>=1.13.3",
"scipy>=1.0.0",
"pandas>=0.21.0",
"numdifftools>=0.9.20"
],
package_data={
"bevel": [
"../README.md",
"../LICENSE",
]
},
)
| true
| true
|
790b55e49938b8165c6db7fcf0fb013c3c3c54cf
| 7,923
|
py
|
Python
|
support/standardize.py
|
AMOOOMA/stocktwits-svm-nlp
|
7abf533dcbe1f7e81b822e7b0b897f4319f89290
|
[
"MIT"
] | 2
|
2021-04-02T08:21:00.000Z
|
2021-08-31T15:28:47.000Z
|
support/standardize.py
|
AMOOOMA/stocktwits-svm-nlp
|
7abf533dcbe1f7e81b822e7b0b897f4319f89290
|
[
"MIT"
] | 4
|
2020-10-30T03:35:36.000Z
|
2020-11-19T03:48:47.000Z
|
support/standardize.py
|
AMOOOMA/stocktwits-svm-nlp
|
7abf533dcbe1f7e81b822e7b0b897f4319f89290
|
[
"MIT"
] | 3
|
2021-03-15T04:33:56.000Z
|
2022-01-13T08:22:21.000Z
|
import random
# averaging the embeddings between 2 words
# return the averaged embeddings
def average_two_embeddings_vectors(a, b):
avg_embeddings = []
i = 0
for embed in a:
z = (embed + b[i]) / 2.0
avg_embeddings.append(z)
i += 1
return avg_embeddings
# helper func; updates tokens and embeddings with the new combined tokens and averaged embeddings
# return the updated tokens string and embeddings vector
def update_tok_and_embed(tokens, embeddings, index, embed2_index, averaged_embeddings):
# update tokens
if embed2_index > index:
tokens[index] = tokens[index] + " " + tokens[embed2_index]
else:
tokens[index] = tokens[embed2_index] + " " + tokens[index]
# update embeddings
embeddings[index] = averaged_embeddings
# delete old tokens and embeddings
del tokens[embed2_index]
del embeddings[embed2_index]
return tokens, embeddings
# helper func
def preprocessing_helper(tokens, embeddings, e, combine_with):
index = 0
avg_embeddings = []
index = tokens.index(e)
first, last = False, False
if (index - 1) == -1:
first = True
if (index + 1) == len(tokens):
last = True
embed1 = embeddings[index]
embed2 = []
embed2_index = 0
# the words following these type of words usually have some relation syntactically and semantically
if combine_with == "after":
if last: # check if element is the last element
return tokens, embeddings
embed2_index = index + 1
embed2 = embeddings[embed2_index]
else: # the words before
if first: # check if first element
return tokens, embeddings
embed2_index = index - 1
embed2 = embeddings[embed2_index]
averaged_embeddings = average_two_embeddings_vectors(embed1, embed2)
return update_tok_and_embed(tokens, embeddings, index, embed2_index, averaged_embeddings)
# common tokens that might fit well with other tokens based on syntactic rules of english
# therefore, standardize with these before running the default algorithm
# return updated tokens and embeddings
def syntactic_rules_for_preprocessing(tokens, embeddings, std_length):
# not comprehensive but a start.
combined_after_set = {"a", "an", "the", "some", "each", "all", "to", "for", "in", "on", "of", "about", "with",
"from", "at", "have", "has", "is", "are", "was", "were", "be", "been", "being", "should",
"would", "will", "do", "don't", "did", "no", "not", "my", "his", "her", "your", "their",
"our", "its", "whose", "go", "going", "went", "come", "came", "coming"}
combined_before_set = {"him", "her", "them", "us", ",", ".", "!", "?", "...", ";", "-", "~"}
if len(tokens) > std_length:
for e in tokens:
# average embeddings with the token that follows the current token
if e in combined_after_set:
tokens, embeddings = preprocessing_helper(tokens, embeddings, e, "after")
if len(tokens) == std_length:
break
continue
# avg embedding with the token that precedes the current token
elif e in combined_before_set:
tokens, embeddings = preprocessing_helper(tokens, embeddings, e, "before")
if len(tokens) == std_length:
break
continue
return tokens, embeddings
# takes in tokens list and corresponding embeddings
# shortens the list until the specified length(default 10)
# shortens by averaging the embedding vectors and combining the corresponding tokens
# combined tokens separated by a space even if it's punctuation. e.g. 'end' + '.' -> "end ."
# returns the standardized tokens and embeddings lists
# implementation: averaging some words that might go together first (e.g. "the cat", "to her")
# then, just randomly select tokens and their adjacent token and average those embedding vectors
def standardize_by_averaging(tokens, embeddings, std_length=10):
flag = True
# so as to not change the original lists
tokens = tokens.copy()
embeddings = embeddings.copy()
while len(tokens) > std_length:
# attempt to standardize with some regards to syntactical knowledge first
if flag:
flag = False
tokens, embeddings = syntactic_rules_for_preprocessing(tokens, embeddings, std_length)
continue
length = len(tokens)
index = random.randint(1, length - 1) # uses randomizer so to vary the averaging place
embed1 = embeddings[index]
embed2 = embeddings[index - 1]
averaged_embeddings = average_two_embeddings_vectors(embed1, embed2)
token, embeddings = update_tok_and_embed(tokens, embeddings, index, index - 1, averaged_embeddings)
return tokens, embeddings
def standardize_by_duplicating(tokens, embeddings, std_length=10):
token_copy, embeddings_copy = tokens[:], embeddings[:]
while len(tokens) < std_length:
# duplicate the whole message once
tokens += token_copy
embeddings += embeddings_copy
return standardize_by_averaging(tokens, embeddings, std_length)
def main():
# fill
long_tokens = ["this", "is", "a", "sentence", "that", "is", "over", "ten", "embeddings",
"long", "and", "that", "there", "are", "punctuations", ".",
"this", "is", "a", "sentence", "that", "is", "over", "ten", "embeddings",
"long", "and", "that", "there", "are", "punctuations", "."]
long_tokens2 = [".", ".", "gonna", "be", "a", "long", "in", "order", "for", "the",
"testing", "of", "the", "code", ".", "there", "will", "be", "some", "weird",
"tokens", "hello", "this", "spellings", "to", "see", "how", "that's", "this", "will", "be", "the"]
long_embeddings = [[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3], [2.3, 4.4], [3.3, 5.8], [8.8, 7.7], [1.1, 2.3],
[9.9, 1.2], [2.1, 2.1], [1.0, 1.0], [1.1, 3.4], [1.2, 3.2], [3.4, 4.0], [1.1, 2.3], [1.1, 1.1],
[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3], [2.3, 4.4], [3.3, 5.8], [8.8, 7.7], [1.1, 2.3],
[9.9, 1.2], [2.1, 2.1], [1.0, 1.0], [1.1, 3.4], [1.2, 3.2], [3.4, 4.0], [1.1, 2.3], [1.1, 1.1]]
# for testing purposes
print("test standardize_by_averaging")
print("before; tokens:\n", long_tokens) # before standardizing
print("before; embeddings:\n", long_embeddings, "\n\n")
tokens, embeddings = standardize_by_averaging(long_tokens, long_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
# test standardize_by_averaging #2, uses the same embeddings as test #1
print("test standardize_by_averaging#2")
print("before; tokens:\n", long_tokens2) # before standardizing
print("before; embeddings:\n", long_embeddings, "\n\n")
tokens, embeddings = standardize_by_averaging(long_tokens2, long_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
# standardize by duplicating
short_tokens = ["This", "is", "looking", "Bullish"]
short_embeddings = [[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3]]
# for testing purposes
print("test standardize_by_duplicating")
print("before; tokens:\n", short_tokens) # before standardizing
print("before embeddings:\n", short_embeddings, "\n\n")
tokens, embeddings = standardize_by_duplicating(short_tokens, short_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
return
if __name__ == "__main__":
# execute only if run as a script
main()
| 40.423469
| 119
| 0.616686
|
import random
def average_two_embeddings_vectors(a, b):
avg_embeddings = []
i = 0
for embed in a:
z = (embed + b[i]) / 2.0
avg_embeddings.append(z)
i += 1
return avg_embeddings
def update_tok_and_embed(tokens, embeddings, index, embed2_index, averaged_embeddings):
if embed2_index > index:
tokens[index] = tokens[index] + " " + tokens[embed2_index]
else:
tokens[index] = tokens[embed2_index] + " " + tokens[index]
embeddings[index] = averaged_embeddings
del tokens[embed2_index]
del embeddings[embed2_index]
return tokens, embeddings
def preprocessing_helper(tokens, embeddings, e, combine_with):
index = 0
avg_embeddings = []
index = tokens.index(e)
first, last = False, False
if (index - 1) == -1:
first = True
if (index + 1) == len(tokens):
last = True
embed1 = embeddings[index]
embed2 = []
embed2_index = 0
if combine_with == "after":
if last:
return tokens, embeddings
embed2_index = index + 1
embed2 = embeddings[embed2_index]
else:
if first:
return tokens, embeddings
embed2_index = index - 1
embed2 = embeddings[embed2_index]
averaged_embeddings = average_two_embeddings_vectors(embed1, embed2)
return update_tok_and_embed(tokens, embeddings, index, embed2_index, averaged_embeddings)
def syntactic_rules_for_preprocessing(tokens, embeddings, std_length):
combined_after_set = {"a", "an", "the", "some", "each", "all", "to", "for", "in", "on", "of", "about", "with",
"from", "at", "have", "has", "is", "are", "was", "were", "be", "been", "being", "should",
"would", "will", "do", "don't", "did", "no", "not", "my", "his", "her", "your", "their",
"our", "its", "whose", "go", "going", "went", "come", "came", "coming"}
combined_before_set = {"him", "her", "them", "us", ",", ".", "!", "?", "...", ";", "-", "~"}
if len(tokens) > std_length:
for e in tokens:
# average embeddings with the token that follows the current token
if e in combined_after_set:
tokens, embeddings = preprocessing_helper(tokens, embeddings, e, "after")
if len(tokens) == std_length:
break
continue
# avg embedding with the token that precedes the current token
elif e in combined_before_set:
tokens, embeddings = preprocessing_helper(tokens, embeddings, e, "before")
if len(tokens) == std_length:
break
continue
return tokens, embeddings
# takes in tokens list and corresponding embeddings
# shortens the list until the specified length(default 10)
# shortens by averaging the embedding vectors and combining the corresponding tokens
# combined tokens separated by a space even if it's punctuation. e.g. 'end' + '.' -> "end ."
def standardize_by_averaging(tokens, embeddings, std_length=10):
flag = True
tokens = tokens.copy()
embeddings = embeddings.copy()
while len(tokens) > std_length:
if flag:
flag = False
tokens, embeddings = syntactic_rules_for_preprocessing(tokens, embeddings, std_length)
continue
length = len(tokens)
index = random.randint(1, length - 1)
embed1 = embeddings[index]
embed2 = embeddings[index - 1]
averaged_embeddings = average_two_embeddings_vectors(embed1, embed2)
token, embeddings = update_tok_and_embed(tokens, embeddings, index, index - 1, averaged_embeddings)
return tokens, embeddings
def standardize_by_duplicating(tokens, embeddings, std_length=10):
token_copy, embeddings_copy = tokens[:], embeddings[:]
while len(tokens) < std_length:
tokens += token_copy
embeddings += embeddings_copy
return standardize_by_averaging(tokens, embeddings, std_length)
def main():
long_tokens = ["this", "is", "a", "sentence", "that", "is", "over", "ten", "embeddings",
"long", "and", "that", "there", "are", "punctuations", ".",
"this", "is", "a", "sentence", "that", "is", "over", "ten", "embeddings",
"long", "and", "that", "there", "are", "punctuations", "."]
long_tokens2 = [".", ".", "gonna", "be", "a", "long", "in", "order", "for", "the",
"testing", "of", "the", "code", ".", "there", "will", "be", "some", "weird",
"tokens", "hello", "this", "spellings", "to", "see", "how", "that's", "this", "will", "be", "the"]
long_embeddings = [[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3], [2.3, 4.4], [3.3, 5.8], [8.8, 7.7], [1.1, 2.3],
[9.9, 1.2], [2.1, 2.1], [1.0, 1.0], [1.1, 3.4], [1.2, 3.2], [3.4, 4.0], [1.1, 2.3], [1.1, 1.1],
[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3], [2.3, 4.4], [3.3, 5.8], [8.8, 7.7], [1.1, 2.3],
[9.9, 1.2], [2.1, 2.1], [1.0, 1.0], [1.1, 3.4], [1.2, 3.2], [3.4, 4.0], [1.1, 2.3], [1.1, 1.1]]
# for testing purposes
print("test standardize_by_averaging")
print("before; tokens:\n", long_tokens) # before standardizing
print("before; embeddings:\n", long_embeddings, "\n\n")
tokens, embeddings = standardize_by_averaging(long_tokens, long_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
# test standardize_by_averaging #2, uses the same embeddings as test #1
print("test standardize_by_averaging#2")
print("before; tokens:\n", long_tokens2) # before standardizing
print("before; embeddings:\n", long_embeddings, "\n\n")
tokens, embeddings = standardize_by_averaging(long_tokens2, long_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
# standardize by duplicating
short_tokens = ["This", "is", "looking", "Bullish"]
short_embeddings = [[1.2, 3.34], [2.3, 3.5], [5.6, 6.6], [5.1, 2.3]]
# for testing purposes
print("test standardize_by_duplicating")
print("before; tokens:\n", short_tokens) # before standardizing
print("before embeddings:\n", short_embeddings, "\n\n")
tokens, embeddings = standardize_by_duplicating(short_tokens, short_embeddings)
print("after; tokens:\n", tokens) # after standardizing
print("after; embeddings:\n", embeddings, "\n\n")
return
if __name__ == "__main__":
# execute only if run as a script
main()
| true
| true
|
790b55eeaf7fbc18fc81ddc57a5a3363dc5632f7
| 3,674
|
py
|
Python
|
myproject/myproject/settings.py
|
panipp/cs459_2019
|
7ec13e69904180b88405de02fe2d9b7001e55557
|
[
"BSD-2-Clause"
] | null | null | null |
myproject/myproject/settings.py
|
panipp/cs459_2019
|
7ec13e69904180b88405de02fe2d9b7001e55557
|
[
"BSD-2-Clause"
] | null | null | null |
myproject/myproject/settings.py
|
panipp/cs459_2019
|
7ec13e69904180b88405de02fe2d9b7001e55557
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@=ozqw=1&(*&)$sdvl_o1#r=+kf=5s#0g^#mo72^ctn1mmzse$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
'django_extensions',
'import_export',
]
IMPORT_EXPORT_USE_TRANSACTIONS = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = './var/www/myProject/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'qr-code': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'qr-code-cache',
'TIMEOUT': 3600
}
}
QR_CODE_CACHE_ALIAS = 'qr-code'
| 24.993197
| 91
| 0.685084
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '@=ozqw=1&(*&)$sdvl_o1#r=+kf=5s#0g^#mo72^ctn1mmzse$'
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
'django_extensions',
'import_export',
]
IMPORT_EXPORT_USE_TRANSACTIONS = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = './var/www/myProject/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'qr-code': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'qr-code-cache',
'TIMEOUT': 3600
}
}
QR_CODE_CACHE_ALIAS = 'qr-code'
| true
| true
|
790b56c8fb09bb60030fb0e92ccaee1eab80ddda
| 3,395
|
py
|
Python
|
test/functional/wallet_zapwallettxes.py
|
gingfinger/divi99
|
3b0602b41bf35fb1e30c12b1bf06ef1da58935eb
|
[
"MIT"
] | null | null | null |
test/functional/wallet_zapwallettxes.py
|
gingfinger/divi99
|
3b0602b41bf35fb1e30c12b1bf06ef1da58935eb
|
[
"MIT"
] | null | null | null |
test/functional/wallet_zapwallettxes.py
|
gingfinger/divi99
|
3b0602b41bf35fb1e30c12b1bf06ef1da58935eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Divi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two divid nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import DiviTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
wait_until,
)
class ZapWalletTXesTest (DiviTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(100)
self.sync_all()
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.nodes[0].generate(1)
self.sync_all()
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
self.nodes[0].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes, but not persistmempool.
# The unconfirmed transaction is zapped and is no longer in the wallet.
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
| 40.903614
| 112
| 0.706922
|
from test_framework.test_framework import DiviTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
wait_until,
)
class ZapWalletTXesTest (DiviTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(100)
self.sync_all()
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.nodes[0].generate(1)
self.sync_all()
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
| true
| true
|
790b5724c8920e5ce2e89dddd49d96d69307028a
| 2,336
|
py
|
Python
|
test/integration/test_focal.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | 1
|
2021-08-04T14:42:26.000Z
|
2021-08-04T14:42:26.000Z
|
test/integration/test_focal.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_focal.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | null | null | null |
import logging
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import kornia as kornia
logger = logging.getLogger(__name__)
class TestIntegrationFocalLoss:
# optimization
thresh = 1e-1
lr = 1e-3
num_iterations = 1000
num_classes = 2
# focal loss
alpha = 2.0
gamma = 2.0
def generate_sample(self, base_target, std_val=0.1):
target = base_target.float() / base_target.max()
noise = std_val * torch.rand(1, 1, 6, 5)
return target + noise
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
def test_conv2d_relu(self):
# we generate base sample
target = torch.LongTensor(1, 6, 5).fill_(0)
for i in range(1, self.num_classes):
target[..., i:-i, i:-i] = i
m = nn.Sequential(
nn.Conv2d(1, self.num_classes, kernel_size=3, padding=1),
nn.ReLU(True),
)
m.apply(self.init_weights)
optimizer = optim.Adam(m.parameters(), lr=self.lr)
criterion = kornia.losses.FocalLoss(
alpha=self.alpha, gamma=self.gamma, reduction='mean')
# NOTE: uncomment to compare against vanilla cross entropy
# criterion = nn.CrossEntropyLoss()
for iter_id in range(self.num_iterations):
sample = self.generate_sample(target)
output = m(sample)
loss = criterion(output, target)
logger.debug("Loss: {}".format(loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
sample = self.generate_sample(target)
output_argmax = torch.argmax(m(sample), dim=1)
logger.debug("Output argmax: \n{}".format(output_argmax))
# TODO(edgar): replace by IoU or find a more stable solution
# for this test. The issue is that depending on
# the seed to initialize the weights affects the
# final results and slows down the convergence of
# the algorithm.
val = F.mse_loss(output_argmax.float(), target.float())
if not val.item() < self.thresh:
pytest.xfail("Wrong seed or initial weight values.")
| 30.736842
| 70
| 0.602312
|
import logging
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import kornia as kornia
logger = logging.getLogger(__name__)
class TestIntegrationFocalLoss:
thresh = 1e-1
lr = 1e-3
num_iterations = 1000
num_classes = 2
alpha = 2.0
gamma = 2.0
def generate_sample(self, base_target, std_val=0.1):
target = base_target.float() / base_target.max()
noise = std_val * torch.rand(1, 1, 6, 5)
return target + noise
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
def test_conv2d_relu(self):
target = torch.LongTensor(1, 6, 5).fill_(0)
for i in range(1, self.num_classes):
target[..., i:-i, i:-i] = i
m = nn.Sequential(
nn.Conv2d(1, self.num_classes, kernel_size=3, padding=1),
nn.ReLU(True),
)
m.apply(self.init_weights)
optimizer = optim.Adam(m.parameters(), lr=self.lr)
criterion = kornia.losses.FocalLoss(
alpha=self.alpha, gamma=self.gamma, reduction='mean')
for iter_id in range(self.num_iterations):
sample = self.generate_sample(target)
output = m(sample)
loss = criterion(output, target)
logger.debug("Loss: {}".format(loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
sample = self.generate_sample(target)
output_argmax = torch.argmax(m(sample), dim=1)
logger.debug("Output argmax: \n{}".format(output_argmax))
val = F.mse_loss(output_argmax.float(), target.float())
if not val.item() < self.thresh:
pytest.xfail("Wrong seed or initial weight values.")
| true
| true
|
790b58d295bd25056c4766a0bfb060d252c6ca7b
| 6,316
|
py
|
Python
|
DP_FL_recreate/opacus/tests/layers_grad_test.py
|
RosaYen/DP_FL_recreation
|
30607645d9633483a4afa50c0e00bea65c0fb355
|
[
"Apache-2.0"
] | null | null | null |
DP_FL_recreate/opacus/tests/layers_grad_test.py
|
RosaYen/DP_FL_recreation
|
30607645d9633483a4afa50c0e00bea65c0fb355
|
[
"Apache-2.0"
] | null | null | null |
DP_FL_recreate/opacus/tests/layers_grad_test.py
|
RosaYen/DP_FL_recreation
|
30607645d9633483a4afa50c0e00bea65c0fb355
|
[
"Apache-2.0"
] | 1
|
2020-12-09T05:56:32.000Z
|
2020-12-09T05:56:32.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
import torch.nn as nn
from opacus import PerSampleGradientClipper
from opacus.dp_model_inspector import DPModelInspector
from opacus.layers import DPLSTM, DPMultiheadAttention, SequenceBias
from opacus.utils.clipping import ConstantFlatClipper
class LayersGradTest(unittest.TestCase):
def setUp(self):
self.validator = DPModelInspector()
def _reset_seeds(self):
torch.manual_seed(1337)
torch.cuda.manual_seed(1337)
def _run_once(self, layer, criterion, *args):
self._reset_seeds()
layer.zero_grad()
output = layer(*args)
if isinstance(output, tuple):
output = output[0]
output = output.squeeze()
y = torch.zeros_like(output)
loss = criterion(output, y)
loss.backward()
def _check_one_layer(self, layer, *args, **kwargs):
self._check_one_layer_with_criterion(
layer, nn.L1Loss(reduction="mean"), *args, **kwargs
)
self._check_one_layer_with_criterion(
layer, nn.L1Loss(reduction="sum"), *args, **kwargs
)
def _check_one_layer_with_criterion(self, layer, criterion, *args, **kwargs):
self.validator.validate(layer)
for name, param in layer.named_parameters():
if ("weight" in name) or ("bias" in name):
nn.init.uniform_(param, -1.0, 1.0)
# run without DP
self._run_once(layer, criterion, *args)
vanilla_run_grads = [
(name, p.grad.detach())
for (name, p) in layer.named_parameters()
if p.requires_grad
]
# run with DP
clipper = PerSampleGradientClipper(
layer,
ConstantFlatClipper(1e9),
batch_first=kwargs.get("batch_first", True),
loss_reduction=criterion.reduction,
)
self._run_once(layer, criterion, *args)
for param_name, param in layer.named_parameters():
if param.requires_grad:
self.assertTrue(
hasattr(param, "grad_sample"),
f"Per-sample gradients haven't been computed for {param_name}",
)
clipper.clip_and_accumulate()
clipper.pre_step()
private_run_grads = [
(name, p.grad.detach())
for (name, p) in layer.named_parameters()
if p.requires_grad
]
# compare
for (vanilla_name, vanilla_grad), (private_name, private_grad) in zip(
vanilla_run_grads, private_run_grads
):
assert vanilla_name == private_name
self.assertTrue(
torch.allclose(vanilla_grad, private_grad, atol=10e-5, rtol=10e-3),
f"Gradient mismatch. Parameter: {layer}.{vanilla_name}, loss: {criterion.reduction}",
)
clipper.close()
def test_conv1d(self):
x = torch.randn(64, 16, 24)
layer = nn.Conv1d(16, 32, 3, 1)
self._check_one_layer(layer, x)
def test_conv2d(self):
x = torch.randn(64, 16, 24, 24)
layer = nn.Conv2d(16, 32, 3, 1)
self._check_one_layer(layer, x)
def test_linear(self):
self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8))
self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8, 8))
def test_layernorm(self):
x = torch.randn(64, 16, 24, 24)
self._check_one_layer(nn.LayerNorm(24), x)
self._check_one_layer(nn.LayerNorm((24, 24)), x)
self._check_one_layer(nn.LayerNorm((16, 24, 24)), x)
def test_groupnorm(self):
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10))
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9))
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9, 8))
def test_instancenorm(self):
self._check_one_layer(
nn.InstanceNorm1d(16, affine=True), torch.randn(64, 16, 10)
)
self._check_one_layer(
nn.InstanceNorm2d(16, affine=True), torch.randn(64, 16, 10, 9)
)
self._check_one_layer(
nn.InstanceNorm3d(16, affine=True), torch.randn(64, 16, 10, 9, 8)
)
def test_sequence_bias(self):
x = torch.randn(4, 3, 2)
layer = SequenceBias(2)
self._check_one_layer(layer, x, batch_first=False)
def test_multihead_attention(self):
x = torch.randn(16, 24, 32)
layer = DPMultiheadAttention(32, 1)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True, dropout=0.05)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(
32, 1, bias=True, add_bias_kv=True, add_zero_attn=True
)
self._check_one_layer(layer, x, x, x, batch_first=False)
q = torch.randn(16, 24, 32)
k = torch.randn(20, 24, 28)
v = torch.randn(20, 24, 28)
layer = DPMultiheadAttention(
32, 1, bias=True, add_bias_kv=True, add_zero_attn=True, kdim=28, vdim=28
)
self._check_one_layer(layer, q, k, v, batch_first=False)
def test_embedding(self):
layer = nn.Embedding(256, 100)
x1 = torch.randint(0, 255, (128, 42)).long()
x2 = torch.randint(0, 255, (64,)).long()
self._check_one_layer(layer, x1)
self._check_one_layer(layer, x2)
def test_lstm_batch_first(self):
# input size : 25 output size : 12 minibatch : 30 sequence length : 20
# Test batch_first=True case
layer = DPLSTM(25, 12, 1, batch_first=True)
x = torch.randn(30, 20, 25)
self._check_one_layer(layer, x, batch_first=True)
def test_lstm_batch_second(self):
# input size : 25 output size : 12 minibatch : 30 sequence length : 20
# Test batch_first=False case
layer = DPLSTM(25, 12, 1, batch_first=False)
x = torch.randn(20, 30, 25)
self._check_one_layer(layer, x, batch_first=False)
| 34.326087
| 101
| 0.611146
|
import unittest
import torch
import torch.nn as nn
from opacus import PerSampleGradientClipper
from opacus.dp_model_inspector import DPModelInspector
from opacus.layers import DPLSTM, DPMultiheadAttention, SequenceBias
from opacus.utils.clipping import ConstantFlatClipper
class LayersGradTest(unittest.TestCase):
def setUp(self):
self.validator = DPModelInspector()
def _reset_seeds(self):
torch.manual_seed(1337)
torch.cuda.manual_seed(1337)
def _run_once(self, layer, criterion, *args):
self._reset_seeds()
layer.zero_grad()
output = layer(*args)
if isinstance(output, tuple):
output = output[0]
output = output.squeeze()
y = torch.zeros_like(output)
loss = criterion(output, y)
loss.backward()
def _check_one_layer(self, layer, *args, **kwargs):
self._check_one_layer_with_criterion(
layer, nn.L1Loss(reduction="mean"), *args, **kwargs
)
self._check_one_layer_with_criterion(
layer, nn.L1Loss(reduction="sum"), *args, **kwargs
)
def _check_one_layer_with_criterion(self, layer, criterion, *args, **kwargs):
self.validator.validate(layer)
for name, param in layer.named_parameters():
if ("weight" in name) or ("bias" in name):
nn.init.uniform_(param, -1.0, 1.0)
self._run_once(layer, criterion, *args)
vanilla_run_grads = [
(name, p.grad.detach())
for (name, p) in layer.named_parameters()
if p.requires_grad
]
clipper = PerSampleGradientClipper(
layer,
ConstantFlatClipper(1e9),
batch_first=kwargs.get("batch_first", True),
loss_reduction=criterion.reduction,
)
self._run_once(layer, criterion, *args)
for param_name, param in layer.named_parameters():
if param.requires_grad:
self.assertTrue(
hasattr(param, "grad_sample"),
f"Per-sample gradients haven't been computed for {param_name}",
)
clipper.clip_and_accumulate()
clipper.pre_step()
private_run_grads = [
(name, p.grad.detach())
for (name, p) in layer.named_parameters()
if p.requires_grad
]
# compare
for (vanilla_name, vanilla_grad), (private_name, private_grad) in zip(
vanilla_run_grads, private_run_grads
):
assert vanilla_name == private_name
self.assertTrue(
torch.allclose(vanilla_grad, private_grad, atol=10e-5, rtol=10e-3),
f"Gradient mismatch. Parameter: {layer}.{vanilla_name}, loss: {criterion.reduction}",
)
clipper.close()
def test_conv1d(self):
x = torch.randn(64, 16, 24)
layer = nn.Conv1d(16, 32, 3, 1)
self._check_one_layer(layer, x)
def test_conv2d(self):
x = torch.randn(64, 16, 24, 24)
layer = nn.Conv2d(16, 32, 3, 1)
self._check_one_layer(layer, x)
def test_linear(self):
self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8))
self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8, 8))
def test_layernorm(self):
x = torch.randn(64, 16, 24, 24)
self._check_one_layer(nn.LayerNorm(24), x)
self._check_one_layer(nn.LayerNorm((24, 24)), x)
self._check_one_layer(nn.LayerNorm((16, 24, 24)), x)
def test_groupnorm(self):
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10))
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9))
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9, 8))
def test_instancenorm(self):
self._check_one_layer(
nn.InstanceNorm1d(16, affine=True), torch.randn(64, 16, 10)
)
self._check_one_layer(
nn.InstanceNorm2d(16, affine=True), torch.randn(64, 16, 10, 9)
)
self._check_one_layer(
nn.InstanceNorm3d(16, affine=True), torch.randn(64, 16, 10, 9, 8)
)
def test_sequence_bias(self):
x = torch.randn(4, 3, 2)
layer = SequenceBias(2)
self._check_one_layer(layer, x, batch_first=False)
def test_multihead_attention(self):
x = torch.randn(16, 24, 32)
layer = DPMultiheadAttention(32, 1)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True, dropout=0.05)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(
32, 1, bias=True, add_bias_kv=True, add_zero_attn=True
)
self._check_one_layer(layer, x, x, x, batch_first=False)
q = torch.randn(16, 24, 32)
k = torch.randn(20, 24, 28)
v = torch.randn(20, 24, 28)
layer = DPMultiheadAttention(
32, 1, bias=True, add_bias_kv=True, add_zero_attn=True, kdim=28, vdim=28
)
self._check_one_layer(layer, q, k, v, batch_first=False)
def test_embedding(self):
layer = nn.Embedding(256, 100)
x1 = torch.randint(0, 255, (128, 42)).long()
x2 = torch.randint(0, 255, (64,)).long()
self._check_one_layer(layer, x1)
self._check_one_layer(layer, x2)
def test_lstm_batch_first(self):
# input size : 25 output size : 12 minibatch : 30 sequence length : 20
# Test batch_first=True case
layer = DPLSTM(25, 12, 1, batch_first=True)
x = torch.randn(30, 20, 25)
self._check_one_layer(layer, x, batch_first=True)
def test_lstm_batch_second(self):
# input size : 25 output size : 12 minibatch : 30 sequence length : 20
# Test batch_first=False case
layer = DPLSTM(25, 12, 1, batch_first=False)
x = torch.randn(20, 30, 25)
self._check_one_layer(layer, x, batch_first=False)
| true
| true
|
790b597128276634ad90a628db9ba321c71a29d3
| 3,503
|
py
|
Python
|
python/threadpool_example.py
|
russcollier/SamplesAndNuggets
|
c69f82801bf8d1fddc06cb3f25c87d4b3f1554f9
|
[
"MIT"
] | null | null | null |
python/threadpool_example.py
|
russcollier/SamplesAndNuggets
|
c69f82801bf8d1fddc06cb3f25c87d4b3f1554f9
|
[
"MIT"
] | null | null | null |
python/threadpool_example.py
|
russcollier/SamplesAndNuggets
|
c69f82801bf8d1fddc06cb3f25c87d4b3f1554f9
|
[
"MIT"
] | null | null | null |
import logging
import urllib.request
from datetime import datetime
from multiprocessing import Manager, Value
from multiprocessing.pool import ThreadPool
class EntryPoint:
Log = logging.getLogger(__name__)
def __init__(self):
self.__total_size = Value('i', 0)
self.__sizes_by_file = Manager().dict()
def main(self):
urls = ['https://code.jquery.com/jquery-git.js',
'https://code.jquery.com/jquery-3.1.0.js',
'https://code.jquery.com/jquery-3.0.0.js',
'https://code.jquery.com/jquery-2.2.0.js',
'https://code.jquery.com/jquery-2.1.0.js',
'https://code.jquery.com/jquery-2.0.0.js',
'https://code.jquery.com/jquery-1.12.0.js',
'https://code.jquery.com/jquery-1.11.0.js',
'https://code.jquery.com/jquery-1.10.0.js',
'https://code.jquery.com/jquery-1.9.0.js',
'https://code.jquery.com/jquery-1.7.0.js',
'https://code.jquery.com/jquery-1.6.js',
'https://code.jquery.com/jquery-1.5.js',
'https://code.jquery.com/jquery-1.4.js',
'https://code.jquery.com/jquery-1.3.js',
'https://code.jquery.com/jquery-1.2.js',
'https://code.jquery.com/jquery-1.1.js',
'https://code.jquery.com/jquery-1.0.js']
self.__compute_serially(urls)
self.__compute_with_threadpool(urls)
def __compute_serially(self, urls):
start_time = datetime.utcnow()
sizes_by_file = dict()
for url in urls:
sizes_by_file[url] = self.__get_size_of_file(url)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Serial version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __compute_with_threadpool(self, urls):
start_time = datetime.utcnow()
pool = ThreadPool(processes=8)
pool.map(self.__get_size_of_file_in_parallel, urls)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(self.__sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Threadpool version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __get_size_of_file_in_parallel(self, url):
self.__sizes_by_file[url] = self.__get_size_of_file(url)
# with self.__total_size.get_lock():
# self.__total_size.value += self.__get_size_of_file(url)
@staticmethod
def __get_size_of_file(url):
with urllib.request.urlopen(url) as f:
contents = f.read()
return len(contents)
@staticmethod
def get_timespan(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def setup_logging():
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
logger = logging.StreamHandler()
logger.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - [%(thread)d] %(name)s - %(message)s'))
root_logger.addHandler(logger)
def main():
setup_logging()
log = logging.getLogger()
try:
EntryPoint().main()
except Exception as e:
log.exception(e)
if __name__ == '__main__':
main()
| 35.383838
| 111
| 0.591493
|
import logging
import urllib.request
from datetime import datetime
from multiprocessing import Manager, Value
from multiprocessing.pool import ThreadPool
class EntryPoint:
Log = logging.getLogger(__name__)
def __init__(self):
self.__total_size = Value('i', 0)
self.__sizes_by_file = Manager().dict()
def main(self):
urls = ['https://code.jquery.com/jquery-git.js',
'https://code.jquery.com/jquery-3.1.0.js',
'https://code.jquery.com/jquery-3.0.0.js',
'https://code.jquery.com/jquery-2.2.0.js',
'https://code.jquery.com/jquery-2.1.0.js',
'https://code.jquery.com/jquery-2.0.0.js',
'https://code.jquery.com/jquery-1.12.0.js',
'https://code.jquery.com/jquery-1.11.0.js',
'https://code.jquery.com/jquery-1.10.0.js',
'https://code.jquery.com/jquery-1.9.0.js',
'https://code.jquery.com/jquery-1.7.0.js',
'https://code.jquery.com/jquery-1.6.js',
'https://code.jquery.com/jquery-1.5.js',
'https://code.jquery.com/jquery-1.4.js',
'https://code.jquery.com/jquery-1.3.js',
'https://code.jquery.com/jquery-1.2.js',
'https://code.jquery.com/jquery-1.1.js',
'https://code.jquery.com/jquery-1.0.js']
self.__compute_serially(urls)
self.__compute_with_threadpool(urls)
def __compute_serially(self, urls):
start_time = datetime.utcnow()
sizes_by_file = dict()
for url in urls:
sizes_by_file[url] = self.__get_size_of_file(url)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Serial version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __compute_with_threadpool(self, urls):
start_time = datetime.utcnow()
pool = ThreadPool(processes=8)
pool.map(self.__get_size_of_file_in_parallel, urls)
self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(self.__sizes_by_file.values())))
time_diff = datetime.utcnow() - start_time
self.Log.info("Threadpool version took: {0}".format(self.get_timespan(time_diff.seconds)))
def __get_size_of_file_in_parallel(self, url):
self.__sizes_by_file[url] = self.__get_size_of_file(url)
@staticmethod
def __get_size_of_file(url):
with urllib.request.urlopen(url) as f:
contents = f.read()
return len(contents)
@staticmethod
def get_timespan(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def setup_logging():
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
logger = logging.StreamHandler()
logger.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - [%(thread)d] %(name)s - %(message)s'))
root_logger.addHandler(logger)
def main():
setup_logging()
log = logging.getLogger()
try:
EntryPoint().main()
except Exception as e:
log.exception(e)
if __name__ == '__main__':
main()
| true
| true
|
790b59a37e99d90e02621b138312db60daee8b61
| 10,928
|
py
|
Python
|
4_Coverage_Evaluation/MNIST/utils.py
|
j-chan-hkust/deep_testing_of_advanced_learning_systems
|
ec535e2b4dc489d407b664a138d3f5262b71d21e
|
[
"MIT"
] | null | null | null |
4_Coverage_Evaluation/MNIST/utils.py
|
j-chan-hkust/deep_testing_of_advanced_learning_systems
|
ec535e2b4dc489d407b664a138d3f5262b71d21e
|
[
"MIT"
] | null | null | null |
4_Coverage_Evaluation/MNIST/utils.py
|
j-chan-hkust/deep_testing_of_advanced_learning_systems
|
ec535e2b4dc489d407b664a138d3f5262b71d21e
|
[
"MIT"
] | null | null | null |
# some utils taken from the DeepXplore Implementation
import random
from collections import defaultdict
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.preprocessing import image
from keras import models, layers, activations
from scipy.spatial.distance import mahalanobis
from numpy.linalg import inv
from itertools import combinations
#loads a mnist image
def preprocess_image(img_path):
img = image.load_img(img_path, target_size=(28, 28), grayscale=True)
input_img_data = image.img_to_array(img)
input_img_data = input_img_data.reshape(1, 28, 28, 1)
input_img_data = input_img_data.astype('float32')
input_img_data /= 255
# input_img_data = preprocess_input(input_img_data) # final input shape = (1,224,224,3)
return input_img_data
def init_neuron_cov_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def get_neuron_coverage(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def update_neuron_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
for num_neuron in range(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
print("new coverage found")
#To test
#gets the distance of the points in standard deviations
#note that it assumes that the points are normally distributed
def distance(point, mean, covarianceMatrix):
return mahalanobis(point, mean, inv(covarianceMatrix))
# an adaptation of some code from deepXplore
# initializes a dictionary that will store which qudrants have been covered
# model - the model we are looking to covered
# layer_index - the layer we are exploring
# group_size - size of the group of neurons we are analyzing
# model_layer_dict - the object we want to initialize
def init_orthant_cov_dict(model, layer_index, group_size, model_layer_dict):
layer = model.layers[layer_index]
# some error handling
if 'flatten' in layer.name or 'input' in layer.name:
print("error in init_dict: layer_index points to the wrong layer")
# we initialize each combination
for neuron_group in combinations(range(layer.output_shape[-1]), group_size): # layer.output_shape[-1] returns the number of total_neurons
for orthant in range(2^group_size-1):
model_layer_dict[(neuron_group, orthant)] = False
def get_orthant_coverage(model_layer_dict):
covered_orthants = len([v for v in model_layer_dict.values() if v])
total_orthants = len(model_layer_dict)
return covered_orthants, total_orthants, covered_orthants / float(total_orthants)
#this is meant to pick a orthant that is not covered
# we actually don't need to use this just yet, maybe if I decide to implement for DeepXplore
def next_orthant_to_cover(model_layer_dict):
not_covered = [(neuron_group, orthant) for (neuron_group, orthant), v in model_layer_dict.items() if not v]
if not_covered:
neuron_group, orthant = random.choice(not_covered)
else:
neuron_group, orthant = random.choice(model_layer_dict.keys())
return neuron_group, orthant
# creates a shortened model that ends at the nth layer, and has no activation function
# same code as from collect_data
def create_shortened_model(model, layer_depth):
# we get the neuron output for the penultimate layer for each neuron
# implemented with help from the suggestion at: https://stackoverflow.com/questions/45492318/keras-retrieve-value-of-node-before-activation-function
# we recreate the model, delete layers up to and including the layer we want to analyze, add a blank layer with no activation, and then import the old weights to this layer.
#make a new model
# some simple input checks
if(layer_depth < 0):
println ('layer depth must be positive!')
sys.exit()
if(layer_depth > len(model.layers)):
println ('layer depth too large!')
sys.exit()
# save the original weights
wgts = model.layers[layer_depth].get_weights()
nthLayerNeurons = model.layers[layer_depth].output_shape[1]
#remove layers up to the nth layer
for i in range(len(model.layers)-layer_depth):
model.pop()
model.summary
# add new layer with no activation
model.add(layers.Dense(nthLayerNeurons,activation = None))
# with the new layer, load the previous weights
model.layers[layer_depth].set_weights(wgts)
# get the output of this new model.
return Model(inputs=model.input, outputs=model.layers[layer_depth].output )
#this code updates the coverage given a certain input
def update_orthant_coverage(input_data, shortened_model, model_layer_dict, mean_vector, covariance_matrix, group_size=1, sd_threshold=1):
layer_outputs = shortened_model.predict(input_data) #get the output
# the reason that we use layer_outputs[0] is change it into a single row, rather than an array with a row.
for neuron_group in combinations(range(layer_outputs.shape[-1]),group_size):
group_output = np.asarray([layer_outputs[0][i] for i in neuron_group]) #get a list of the outputs
# we do binary addition to get the correct orthant index.
# for example, if we only have a 2 variables, we have 4 quadrants. we need to classify into 0,1,2,3 index
#init the tools to find which orthant is being explored
orthant = 0
add = int(1)
for neuron_index in neuron_group:
if layer_outputs[0][neuron_index] > mean_vector[neuron_index]:
orthant += add
add *= 2
if model_layer_dict[(neuron_group,orthant)] == True:
continue #don't do the expensive action of loading the group cov, group mean, and calculating the distance
group_mean = np.asarray([mean_vector[i] for i in neuron_group]) #list of mean
#initialize the group numpy array for later calculation
group_cov_matrix = np.asarray([[covariance_matrix[j][i] for i in neuron_group] for j in neuron_group]) #dont ask me why
if(distance(group_output, group_mean, group_cov_matrix)>sd_threshold):
model_layer_dict[(neuron_group,orthant)] = True
# just a simple check if we have full coverage works for any coverage
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
# from here on is code from deepxplore
# util function to convert a tensor into a valid image
def deprocess_image(x):
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x.reshape(x.shape[1], x.shape[2]) # original shape (1,img_rows, img_cols,1)
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(6, 6)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model1_layer_index, model2, model2_layer_index, model3, model3_layer_index, group_size = 1):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model1_layer_index, group_size, model_layer_dict1)
init_dict(model2, model2_layer_index, group_size, model_layer_dict2)
init_dict(model3, model3_layer_index, group_size, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_neuron_coverage_table(model1):
model_layer_dict1 = defaultdict(bool)
init_neuron_cov_dict(model1, model_layer_dict1)
return model_layer_dict1
def init_orthant_coverage_table(model1, layer_index, group_size):
model_layer_dict1 = defaultdict(bool)
init_orthant_cov_dict(model1, layer_index, group_size, model_layer_dict1)
return model_layer_dict1
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
# if predictions2 == predictions3 == target and predictions1 != target:
if not predictions1 == predictions2 == predictions3:
return True
return False
| 42.521401
| 177
| 0.726116
|
import random
from collections import defaultdict
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.preprocessing import image
from keras import models, layers, activations
from scipy.spatial.distance import mahalanobis
from numpy.linalg import inv
from itertools import combinations
def preprocess_image(img_path):
img = image.load_img(img_path, target_size=(28, 28), grayscale=True)
input_img_data = image.img_to_array(img)
input_img_data = input_img_data.reshape(1, 28, 28, 1)
input_img_data = input_img_data.astype('float32')
input_img_data /= 255
t_neuron_cov_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def neuron_to_cover(model_layer_dict):
not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]
if not_covered:
layer_name, index = random.choice(not_covered)
else:
layer_name, index = random.choice(model_layer_dict.keys())
return layer_name, index
def get_neuron_coverage(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def update_neuron_coverage(input_data, model, model_layer_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
for num_neuron in range(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
print("new coverage found")
def distance(point, mean, covarianceMatrix):
return mahalanobis(point, mean, inv(covarianceMatrix))
def init_orthant_cov_dict(model, layer_index, group_size, model_layer_dict):
layer = model.layers[layer_index]
if 'flatten' in layer.name or 'input' in layer.name:
print("error in init_dict: layer_index points to the wrong layer")
for neuron_group in combinations(range(layer.output_shape[-1]), group_size):
for orthant in range(2^group_size-1):
model_layer_dict[(neuron_group, orthant)] = False
def get_orthant_coverage(model_layer_dict):
covered_orthants = len([v for v in model_layer_dict.values() if v])
total_orthants = len(model_layer_dict)
return covered_orthants, total_orthants, covered_orthants / float(total_orthants)
def next_orthant_to_cover(model_layer_dict):
not_covered = [(neuron_group, orthant) for (neuron_group, orthant), v in model_layer_dict.items() if not v]
if not_covered:
neuron_group, orthant = random.choice(not_covered)
else:
neuron_group, orthant = random.choice(model_layer_dict.keys())
return neuron_group, orthant
# creates a shortened model that ends at the nth layer, and has no activation function
# same code as from collect_data
def create_shortened_model(model, layer_depth):
# we get the neuron output for the penultimate layer for each neuron
# implemented with help from the suggestion at: https://stackoverflow.com/questions/45492318/keras-retrieve-value-of-node-before-activation-function
# we recreate the model, delete layers up to and including the layer we want to analyze, add a blank layer with no activation, and then import the old weights to this layer.
#make a new model
# some simple input checks
if(layer_depth < 0):
println ('layer depth must be positive!')
sys.exit()
if(layer_depth > len(model.layers)):
println ('layer depth too large!')
sys.exit()
# save the original weights
wgts = model.layers[layer_depth].get_weights()
nthLayerNeurons = model.layers[layer_depth].output_shape[1]
#remove layers up to the nth layer
for i in range(len(model.layers)-layer_depth):
model.pop()
model.summary
# add new layer with no activation
model.add(layers.Dense(nthLayerNeurons,activation = None))
# with the new layer, load the previous weights
model.layers[layer_depth].set_weights(wgts)
# get the output of this new model.
return Model(inputs=model.input, outputs=model.layers[layer_depth].output )
#this code updates the coverage given a certain input
def update_orthant_coverage(input_data, shortened_model, model_layer_dict, mean_vector, covariance_matrix, group_size=1, sd_threshold=1):
layer_outputs = shortened_model.predict(input_data) #get the output
# the reason that we use layer_outputs[0] is change it into a single row, rather than an array with a row.
for neuron_group in combinations(range(layer_outputs.shape[-1]),group_size):
group_output = np.asarray([layer_outputs[0][i] for i in neuron_group]) #get a list of the outputs
# we do binary addition to get the correct orthant index.
# for example, if we only have a 2 variables, we have 4 quadrants. we need to classify into 0,1,2,3 index
#init the tools to find which orthant is being explored
orthant = 0
add = int(1)
for neuron_index in neuron_group:
if layer_outputs[0][neuron_index] > mean_vector[neuron_index]:
orthant += add
add *= 2
if model_layer_dict[(neuron_group,orthant)] == True:
continue #don't do the expensive action of loading the group cov, group mean, and calculating the distance
group_mean = np.asarray([mean_vector[i] for i in neuron_group])
group_cov_matrix = np.asarray([[covariance_matrix[j][i] for i in neuron_group] for j in neuron_group])
if(distance(group_output, group_mean, group_cov_matrix)>sd_threshold):
model_layer_dict[(neuron_group,orthant)] = True
def full_coverage(model_layer_dict):
if False in model_layer_dict.values():
return False
return True
def deprocess_image(x):
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x.reshape(x.shape[1], x.shape[2])
def normalize(x):
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def constraint_occl(gradients, start_point, rect_shape):
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def constraint_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = np.mean(gradients)
return grad_mean * new_grads
def constraint_black(gradients, rect_shape=(6, 6)):
start_point = (
random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def init_coverage_tables(model1, model1_layer_index, model2, model2_layer_index, model3, model3_layer_index, group_size = 1):
model_layer_dict1 = defaultdict(bool)
model_layer_dict2 = defaultdict(bool)
model_layer_dict3 = defaultdict(bool)
init_dict(model1, model1_layer_index, group_size, model_layer_dict1)
init_dict(model2, model2_layer_index, group_size, model_layer_dict2)
init_dict(model3, model3_layer_index, group_size, model_layer_dict3)
return model_layer_dict1, model_layer_dict2, model_layer_dict3
def init_neuron_coverage_table(model1):
model_layer_dict1 = defaultdict(bool)
init_neuron_cov_dict(model1, model_layer_dict1)
return model_layer_dict1
def init_orthant_coverage_table(model1, layer_index, group_size):
model_layer_dict1 = defaultdict(bool)
init_orthant_cov_dict(model1, layer_index, group_size, model_layer_dict1)
return model_layer_dict1
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def fired(model, layer_name, index, input_data, threshold=0):
intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]
scaled = scale(intermediate_layer_output)
if np.mean(scaled[..., index]) > threshold:
return True
return False
def diverged(predictions1, predictions2, predictions3, target):
if not predictions1 == predictions2 == predictions3:
return True
return False
| true
| true
|
790b5a2f3bee4f74cdcbd9e18c7d8e42cc5430da
| 1,024
|
py
|
Python
|
ElevatorBot/commands/miscellaneous/poll/remove.py
|
LukasSchmid97/elevatorbot
|
77c45d8945c735c8dce9bc75563086bce265dc18
|
[
"MIT"
] | null | null | null |
ElevatorBot/commands/miscellaneous/poll/remove.py
|
LukasSchmid97/elevatorbot
|
77c45d8945c735c8dce9bc75563086bce265dc18
|
[
"MIT"
] | 89
|
2021-08-12T15:23:05.000Z
|
2022-01-11T12:33:21.000Z
|
ElevatorBot/commands/miscellaneous/poll/remove.py
|
LukasSchmid97/elevatorbot
|
77c45d8945c735c8dce9bc75563086bce265dc18
|
[
"MIT"
] | 1
|
2021-10-20T20:07:22.000Z
|
2021-10-20T20:07:22.000Z
|
from dis_snek.models import InteractionContext, OptionTypes, slash_command, slash_option
from ElevatorBot.commandHelpers.subCommandTemplates import poll_sub_command
from ElevatorBot.commands.base import BaseScale
from ElevatorBot.core.misc.poll import Poll
class PollRemove(BaseScale):
@slash_command(
**poll_sub_command,
sub_cmd_name="remove",
sub_cmd_description="Remove an existing option from an existing poll",
)
@slash_option(
name="poll_id", description="The ID of the poll", opt_type=OptionTypes.INTEGER, required=True, min_value=0
)
@slash_option(
name="option",
description="The name the option should have",
opt_type=OptionTypes.STRING,
required=True,
)
async def remove(self, ctx: InteractionContext, poll_id: int, option: str):
poll = await Poll.from_poll_id(poll_id=poll_id, ctx=ctx)
if poll:
await poll.remove_option(ctx=ctx, option=option)
def setup(client):
PollRemove(client)
| 32
| 114
| 0.710938
|
from dis_snek.models import InteractionContext, OptionTypes, slash_command, slash_option
from ElevatorBot.commandHelpers.subCommandTemplates import poll_sub_command
from ElevatorBot.commands.base import BaseScale
from ElevatorBot.core.misc.poll import Poll
class PollRemove(BaseScale):
@slash_command(
**poll_sub_command,
sub_cmd_name="remove",
sub_cmd_description="Remove an existing option from an existing poll",
)
@slash_option(
name="poll_id", description="The ID of the poll", opt_type=OptionTypes.INTEGER, required=True, min_value=0
)
@slash_option(
name="option",
description="The name the option should have",
opt_type=OptionTypes.STRING,
required=True,
)
async def remove(self, ctx: InteractionContext, poll_id: int, option: str):
poll = await Poll.from_poll_id(poll_id=poll_id, ctx=ctx)
if poll:
await poll.remove_option(ctx=ctx, option=option)
def setup(client):
PollRemove(client)
| true
| true
|
790b5c68fae56c3a8a03c1c98b95774e0ef411e2
| 13,562
|
py
|
Python
|
pyvisa-py/protocols/usbtmc.py
|
circuitfox/pyvisa-py
|
66e41dd6ad5683f087ca8caca62df754b00648da
|
[
"MIT"
] | null | null | null |
pyvisa-py/protocols/usbtmc.py
|
circuitfox/pyvisa-py
|
66e41dd6ad5683f087ca8caca62df754b00648da
|
[
"MIT"
] | null | null | null |
pyvisa-py/protocols/usbtmc.py
|
circuitfox/pyvisa-py
|
66e41dd6ad5683f087ca8caca62df754b00648da
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pyvisa-py.protocols.usbtmc
~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements Session to control USBTMC instruments
Loosely based on PyUSBTMC:python module to handle USB-TMC(Test and
Measurement class) devices.
by Noboru Yamamot, Accl. Lab, KEK, JAPAN
This file is an offspring of the Lantz Project.
:copyright: 2014-2018 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import enum
from pyvisa.compat import struct
import time
from collections import namedtuple
import warnings
import usb
from .usbutil import (find_devices, find_interfaces, find_endpoint,
usb_find_desc)
import sys
if sys.version_info < (3, 2):
def array_to_bytes(arr):
return arr.tostring()
else:
def array_to_bytes(arr):
return arr.tobytes()
class MsgID(enum.IntEnum):
"""From USB-TMC table2
"""
dev_dep_msg_out = 1
request_dev_dep_msg_in = 2
dev_dep_msg_in = 2
vendor_specific_out = 126
request_vendor_specific_in = 127
vendor_specific_in = 127
trigger = 128 # for USB488
class Request(enum.IntEnum):
initiate_abort_bulk_out = 1
check_abort_bulk_out_status = 2
initiate_abort_bulk_in = 3
check_abort_bulk_in_status = 4
initiate_clear = 5
check_clear_status = 6
get_capabilities = 7
indicator_pulse = 64
class UsbTmcStatus(enum.IntEnum):
success = 1
pending = 2
failed = 0x80
transfer_not_in_progress = 0x81
split_not_in_progress = 0x82
split_in_progress = 0x83
def find_tmc_devices(vendor=None, product=None, serial_number=None,
custom_match=None, **kwargs):
"""Find connected USBTMC devices. See usbutil.find_devices for more info.
"""
def is_usbtmc(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xfe,
bInterfaceSubClass=3))
return find_devices(vendor, product, serial_number, is_usbtmc, **kwargs)
class BulkOutMessage(object):
"""The Host uses the Bulk-OUT endpoint to send USBTMC command messages to
the device.
"""
@staticmethod
def build_array(btag, eom, chunk):
size = len(chunk)
return (struct.pack('BBBx', MsgID.dev_dep_msg_out, btag,
~btag & 0xFF) +
struct.pack("<LBxxx", size, eom) +
chunk +
b'\0' * ((4 - size) % 4))
class BulkInMessage(namedtuple('BulkInMessage', 'msgid btag btaginverse '
'transfer_size transfer_attributes data')):
"""The Host uses the Bulk-IN endpoint to read USBTMC response messages from
the device.
The Host must first send a USBTMC command message that expects a response
before attempting to read a USBTMC response message.
"""
@classmethod
def from_bytes(cls, data):
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
if msgid != MsgID.dev_dep_msg_in:
warnings.warn('Unexpected MsgID format. Consider updating the device\'s firmware. See https://github.com/pyvisa/pyvisa-py/issues/20')
return BulkInMessage.from_quirky(data)
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
# Truncate data to the specified length (discard padding).
data = data[12:12+transfer_size]
return cls(msgid, btag, btaginverse, transfer_size,
transfer_attributes, data)
@classmethod
def from_quirky(cls, data):
"""Constructs a correct response for quirky devices"""
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
data = data.rstrip(b'\x00')
# check whether it contains a ';' and if throw away the first 12 bytes
if ';' in str(data):
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
data = data[12:]
else:
transfer_size = 0
transfer_attributes = 1
return cls(msgid, btag, btaginverse, transfer_size, transfer_attributes, data)
@staticmethod
def build_array(btag, transfer_size, term_char=None):
"""
:param transfer_size:
:param btag:
:param term_char:
:return:
"""
if term_char is None:
transfer_attributes = 0
term_char = 0
else:
transfer_attributes = 2
return (struct.pack('BBBx', MsgID.request_dev_dep_msg_in, btag,
~btag & 0xFF) +
struct.pack("<LBBxx", transfer_size, transfer_attributes,
term_char))
class USBRaw(object):
"""Base class for drivers that communicate with instruments
via usb port using pyUSB
"""
#: Configuration number to be used. If None, the default will be used.
CONFIGURATION = None
#: Interface index it be used
INTERFACE = (0, 0)
#: Receive and Send endpoints to be used. If None the first IN (or OUT)
#: BULK endpoint will be used.
ENDPOINTS = (None, None)
find_devices = staticmethod(find_devices)
def __init__(self, vendor=None, product=None, serial_number=None,
device_filters=None, timeout=None, **kwargs):
super(USBRaw, self).__init__()
# Timeout expressed in ms as an integer and limited to 2**32-1
# If left to None pyusb will use its default value
self.timeout = timeout
device_filters = device_filters or {}
devices = list(self.find_devices(vendor, product, serial_number, None,
**device_filters))
if not devices:
raise ValueError('No device found.')
elif len(devices) > 1:
desc = '\n'.join(str(dev) for dev in devices)
raise ValueError('{} devices found:\n{}\nPlease narrow the search'
' criteria'.format(len(devices), desc))
self.usb_dev = devices[0]
try:
if self.usb_dev.is_kernel_driver_active(0):
self.usb_dev.detach_kernel_driver(0)
except (usb.core.USBError, NotImplementedError) as e:
pass
try:
self.usb_dev.set_configuration()
except usb.core.USBError as e:
raise Exception('failed to set configuration\n %s' % e)
try:
self.usb_dev.set_interface_altsetting()
except usb.core.USBError as e:
pass
self.usb_intf = self._find_interface(self.usb_dev, self.INTERFACE)
self.usb_recv_ep, self.usb_send_ep =\
self._find_endpoints(self.usb_intf, self.ENDPOINTS)
def _find_interface(self, dev, setting):
return self.usb_dev.get_active_configuration()[self.INTERFACE]
def _find_endpoints(self, interface, setting):
recv, send = setting
if recv is None:
recv = find_endpoint(interface, usb.ENDPOINT_IN,
usb.ENDPOINT_TYPE_BULK)
else:
recv = usb_find_desc(interface, bEndpointAddress=recv)
if send is None:
send = find_endpoint(interface, usb.ENDPOINT_OUT,
usb.ENDPOINT_TYPE_BULK)
else:
send = usb_find_desc(interface, bEndpointAddress=send)
return recv, send
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
try:
return self.usb_send_ep.write(data)
except usb.core.USBError as e:
raise ValueError(str(e))
def read(self, size):
"""Receive raw bytes to the instrument.
:param size: number of bytes to receive
:return: received bytes
:return type: bytes
"""
if size <= 0:
size = 1
data = array_to_bytes(self.usb_recv_ep.read(size, self.timeout))
return data
def close(self):
return usb.util.dispose_resources(self.usb_dev)
class USBTMC(USBRaw):
# Maximum number of bytes per transfer (for sending and receiving).
RECV_CHUNK = 1024 ** 2
find_devices = staticmethod(find_tmc_devices)
def __init__(self, vendor=None, product=None, serial_number=None,
**kwargs):
super(USBTMC, self).__init__(vendor, product, serial_number, **kwargs)
self.usb_intr_in = find_endpoint(self.usb_intf, usb.ENDPOINT_IN,
usb.ENDPOINT_TYPE_INTERRUPT)
self.usb_dev.reset()
self.usb_dev.set_configuration()
time.sleep(0.01)
self._get_capabilities()
self._btag = 0
if not (self.usb_recv_ep and self.usb_send_ep):
msg = "TMC device must have both Bulk-In and Bulk-out endpoints."
raise ValueError(msg)
def _get_capabilities(self):
self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE),
Request.get_capabilities,
0x0000,
self.usb_intf.index,
0x0018,
timeout=self.timeout)
def _find_interface(self, dev, setting):
interfaces = find_interfaces(dev, bInterfaceClass=0xFE,
bInterfaceSubClass=3)
if not interfaces:
raise ValueError('USB TMC interface not found.')
elif len(interfaces) > 1:
pass
return interfaces[0]
def _abort_bulk_in(self, btag):
"""Request that the device abort a pending Bulk-IN operation."""
abort_timeout_ms = 5000
# Send INITIATE_ABORT_BULK_IN.
# According to USBTMC 1.00 4.2.1.4:
# wValue = bTag value of transfer to be aborted
# wIndex = Bulk-IN endpoint
# wLength = 0x0002 (length of device response)
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT),
Request.initiate_abort_bulk_in,
btag,
self.usb_recv_ep.bEndpointAddress,
0x0002,
timeout=abort_timeout_ms)
if data[0] != UsbTmcStatus.success:
# Abort Bulk-IN failed. Ignore it.
return
# Read remaining data from Bulk-IN endpoint.
self.usb_recv_ep.read(self.RECV_CHUNK, abort_timeout_ms)
# Send CHECK_ABORT_BULK_IN_STATUS until it completes.
# According to USBTMC 1.00 4.2.1.5:
# wValue = 0x0000
# wIndex = Bulk-IN endpoint
# wLength = 0x0008 (length of device response)
for retry in range(100):
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT),
Request.check_abort_bulk_in_status,
0x0000,
self.usb_recv_ep.bEndpointAddress,
0x0008,
timeout=abort_timeout_ms)
if data[0] != UsbTmcStatus.pending:
break
time.sleep(0.05)
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
begin, end, size = 0, 0, len(data)
bytes_sent = 0
raw_write = super(USBTMC, self).write
# Send all data via one or more Bulk-OUT transfers.
# Set the EOM flag on the last transfer only.
# Send at least one transfer (possibly empty).
while (end == 0) or (end < size):
begin, end = end, begin + self.RECV_CHUNK
self._btag = (self._btag % 255) + 1
eom = (end >= size)
data = BulkOutMessage.build_array(self._btag, eom, data[begin:end])
bytes_sent += raw_write(data)
return size
def read(self, size):
recv_chunk = self.RECV_CHUNK
if size > 0 and size < recv_chunk:
recv_chunk = size
header_size = 12
max_padding = 511
eom = False
raw_read = super(USBTMC, self).read
raw_write = super(USBTMC, self).write
received = bytearray()
while not eom:
self._btag = (self._btag % 255) + 1
req = BulkInMessage.build_array(self._btag, recv_chunk, None)
raw_write(req)
try:
resp = raw_read(recv_chunk + header_size + max_padding)
response = BulkInMessage.from_bytes(resp)
except (usb.core.USBError, ValueError):
# Abort failed Bulk-IN operation.
self._abort_bulk_in(self._btag)
raise
received.extend(response.data)
# Detect EOM only when device sends all expected bytes.
if len(response.data) >= response.transfer_size:
eom = response.transfer_attributes & 1
return bytes(received)
| 31.393519
| 145
| 0.595045
|
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import enum
from pyvisa.compat import struct
import time
from collections import namedtuple
import warnings
import usb
from .usbutil import (find_devices, find_interfaces, find_endpoint,
usb_find_desc)
import sys
if sys.version_info < (3, 2):
def array_to_bytes(arr):
return arr.tostring()
else:
def array_to_bytes(arr):
return arr.tobytes()
class MsgID(enum.IntEnum):
dev_dep_msg_out = 1
request_dev_dep_msg_in = 2
dev_dep_msg_in = 2
vendor_specific_out = 126
request_vendor_specific_in = 127
vendor_specific_in = 127
trigger = 128
class Request(enum.IntEnum):
initiate_abort_bulk_out = 1
check_abort_bulk_out_status = 2
initiate_abort_bulk_in = 3
check_abort_bulk_in_status = 4
initiate_clear = 5
check_clear_status = 6
get_capabilities = 7
indicator_pulse = 64
class UsbTmcStatus(enum.IntEnum):
success = 1
pending = 2
failed = 0x80
transfer_not_in_progress = 0x81
split_not_in_progress = 0x82
split_in_progress = 0x83
def find_tmc_devices(vendor=None, product=None, serial_number=None,
custom_match=None, **kwargs):
def is_usbtmc(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xfe,
bInterfaceSubClass=3))
return find_devices(vendor, product, serial_number, is_usbtmc, **kwargs)
class BulkOutMessage(object):
@staticmethod
def build_array(btag, eom, chunk):
size = len(chunk)
return (struct.pack('BBBx', MsgID.dev_dep_msg_out, btag,
~btag & 0xFF) +
struct.pack("<LBxxx", size, eom) +
chunk +
b'\0' * ((4 - size) % 4))
class BulkInMessage(namedtuple('BulkInMessage', 'msgid btag btaginverse '
'transfer_size transfer_attributes data')):
@classmethod
def from_bytes(cls, data):
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
if msgid != MsgID.dev_dep_msg_in:
warnings.warn('Unexpected MsgID format. Consider updating the device\'s firmware. See https://github.com/pyvisa/pyvisa-py/issues/20')
return BulkInMessage.from_quirky(data)
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
# Truncate data to the specified length (discard padding).
data = data[12:12+transfer_size]
return cls(msgid, btag, btaginverse, transfer_size,
transfer_attributes, data)
@classmethod
def from_quirky(cls, data):
msgid, btag, btaginverse = struct.unpack_from('BBBx', data)
data = data.rstrip(b'\x00')
# check whether it contains a ';' and if throw away the first 12 bytes
if ';' in str(data):
transfer_size, transfer_attributes = struct.unpack_from('<LBxxx', data, 4)
data = data[12:]
else:
transfer_size = 0
transfer_attributes = 1
return cls(msgid, btag, btaginverse, transfer_size, transfer_attributes, data)
@staticmethod
def build_array(btag, transfer_size, term_char=None):
if term_char is None:
transfer_attributes = 0
term_char = 0
else:
transfer_attributes = 2
return (struct.pack('BBBx', MsgID.request_dev_dep_msg_in, btag,
~btag & 0xFF) +
struct.pack("<LBBxx", transfer_size, transfer_attributes,
term_char))
class USBRaw(object):
#: Configuration number to be used. If None, the default will be used.
CONFIGURATION = None
#: Interface index it be used
INTERFACE = (0, 0)
#: Receive and Send endpoints to be used. If None the first IN (or OUT)
#: BULK endpoint will be used.
ENDPOINTS = (None, None)
find_devices = staticmethod(find_devices)
def __init__(self, vendor=None, product=None, serial_number=None,
device_filters=None, timeout=None, **kwargs):
super(USBRaw, self).__init__()
# Timeout expressed in ms as an integer and limited to 2**32-1
# If left to None pyusb will use its default value
self.timeout = timeout
device_filters = device_filters or {}
devices = list(self.find_devices(vendor, product, serial_number, None,
**device_filters))
if not devices:
raise ValueError('No device found.')
elif len(devices) > 1:
desc = '\n'.join(str(dev) for dev in devices)
raise ValueError('{} devices found:\n{}\nPlease narrow the search'
' criteria'.format(len(devices), desc))
self.usb_dev = devices[0]
try:
if self.usb_dev.is_kernel_driver_active(0):
self.usb_dev.detach_kernel_driver(0)
except (usb.core.USBError, NotImplementedError) as e:
pass
try:
self.usb_dev.set_configuration()
except usb.core.USBError as e:
raise Exception('failed to set configuration\n %s' % e)
try:
self.usb_dev.set_interface_altsetting()
except usb.core.USBError as e:
pass
self.usb_intf = self._find_interface(self.usb_dev, self.INTERFACE)
self.usb_recv_ep, self.usb_send_ep =\
self._find_endpoints(self.usb_intf, self.ENDPOINTS)
def _find_interface(self, dev, setting):
return self.usb_dev.get_active_configuration()[self.INTERFACE]
def _find_endpoints(self, interface, setting):
recv, send = setting
if recv is None:
recv = find_endpoint(interface, usb.ENDPOINT_IN,
usb.ENDPOINT_TYPE_BULK)
else:
recv = usb_find_desc(interface, bEndpointAddress=recv)
if send is None:
send = find_endpoint(interface, usb.ENDPOINT_OUT,
usb.ENDPOINT_TYPE_BULK)
else:
send = usb_find_desc(interface, bEndpointAddress=send)
return recv, send
def write(self, data):
try:
return self.usb_send_ep.write(data)
except usb.core.USBError as e:
raise ValueError(str(e))
def read(self, size):
if size <= 0:
size = 1
data = array_to_bytes(self.usb_recv_ep.read(size, self.timeout))
return data
def close(self):
return usb.util.dispose_resources(self.usb_dev)
class USBTMC(USBRaw):
# Maximum number of bytes per transfer (for sending and receiving).
RECV_CHUNK = 1024 ** 2
find_devices = staticmethod(find_tmc_devices)
def __init__(self, vendor=None, product=None, serial_number=None,
**kwargs):
super(USBTMC, self).__init__(vendor, product, serial_number, **kwargs)
self.usb_intr_in = find_endpoint(self.usb_intf, usb.ENDPOINT_IN,
usb.ENDPOINT_TYPE_INTERRUPT)
self.usb_dev.reset()
self.usb_dev.set_configuration()
time.sleep(0.01)
self._get_capabilities()
self._btag = 0
if not (self.usb_recv_ep and self.usb_send_ep):
msg = "TMC device must have both Bulk-In and Bulk-out endpoints."
raise ValueError(msg)
def _get_capabilities(self):
self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE),
Request.get_capabilities,
0x0000,
self.usb_intf.index,
0x0018,
timeout=self.timeout)
def _find_interface(self, dev, setting):
interfaces = find_interfaces(dev, bInterfaceClass=0xFE,
bInterfaceSubClass=3)
if not interfaces:
raise ValueError('USB TMC interface not found.')
elif len(interfaces) > 1:
pass
return interfaces[0]
def _abort_bulk_in(self, btag):
abort_timeout_ms = 5000
# Send INITIATE_ABORT_BULK_IN.
# According to USBTMC 1.00 4.2.1.4:
# wValue = bTag value of transfer to be aborted
# wIndex = Bulk-IN endpoint
# wLength = 0x0002 (length of device response)
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT),
Request.initiate_abort_bulk_in,
btag,
self.usb_recv_ep.bEndpointAddress,
0x0002,
timeout=abort_timeout_ms)
if data[0] != UsbTmcStatus.success:
# Abort Bulk-IN failed. Ignore it.
return
# Read remaining data from Bulk-IN endpoint.
self.usb_recv_ep.read(self.RECV_CHUNK, abort_timeout_ms)
# Send CHECK_ABORT_BULK_IN_STATUS until it completes.
# According to USBTMC 1.00 4.2.1.5:
# wValue = 0x0000
# wIndex = Bulk-IN endpoint
# wLength = 0x0008 (length of device response)
for retry in range(100):
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT),
Request.check_abort_bulk_in_status,
0x0000,
self.usb_recv_ep.bEndpointAddress,
0x0008,
timeout=abort_timeout_ms)
if data[0] != UsbTmcStatus.pending:
break
time.sleep(0.05)
def write(self, data):
begin, end, size = 0, 0, len(data)
bytes_sent = 0
raw_write = super(USBTMC, self).write
# Send all data via one or more Bulk-OUT transfers.
# Set the EOM flag on the last transfer only.
# Send at least one transfer (possibly empty).
while (end == 0) or (end < size):
begin, end = end, begin + self.RECV_CHUNK
self._btag = (self._btag % 255) + 1
eom = (end >= size)
data = BulkOutMessage.build_array(self._btag, eom, data[begin:end])
bytes_sent += raw_write(data)
return size
def read(self, size):
recv_chunk = self.RECV_CHUNK
if size > 0 and size < recv_chunk:
recv_chunk = size
header_size = 12
max_padding = 511
eom = False
raw_read = super(USBTMC, self).read
raw_write = super(USBTMC, self).write
received = bytearray()
while not eom:
self._btag = (self._btag % 255) + 1
req = BulkInMessage.build_array(self._btag, recv_chunk, None)
raw_write(req)
try:
resp = raw_read(recv_chunk + header_size + max_padding)
response = BulkInMessage.from_bytes(resp)
except (usb.core.USBError, ValueError):
# Abort failed Bulk-IN operation.
self._abort_bulk_in(self._btag)
raise
received.extend(response.data)
# Detect EOM only when device sends all expected bytes.
if len(response.data) >= response.transfer_size:
eom = response.transfer_attributes & 1
return bytes(received)
| true
| true
|
790b5d452a362eacbee42beb11f6b3961f3c3d91
| 2,020
|
py
|
Python
|
tests/core/test_constants.py
|
SmallCream/django-cool
|
63b136da7ce39135c9f900e8161288f8fc8893a4
|
[
"BSD-3-Clause"
] | 11
|
2020-05-19T09:52:35.000Z
|
2022-02-25T10:39:56.000Z
|
tests/core/test_constants.py
|
SmallCream/django-cool
|
63b136da7ce39135c9f900e8161288f8fc8893a4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/test_constants.py
|
SmallCream/django-cool
|
63b136da7ce39135c9f900e8161288f8fc8893a4
|
[
"BSD-3-Clause"
] | 1
|
2020-12-24T08:14:58.000Z
|
2020-12-24T08:14:58.000Z
|
# encoding: utf-8
import unittest
from cool.core import constants
class IntConstants(constants.Constants):
TEST0 = (0, 'test0')
TEST1 = (1, 'test1')
class IntStringCodeConstants(constants.Constants):
TEST = ('test', 'test0')
TEST1 = (1, 'test1')
class ConstantsTests(unittest.TestCase):
def test_unique(self):
with self.assertRaises(ValueError):
class TestUniqueConstants(constants.Constants):
TEST0 = (0, 'test0')
TEST1 = (0, 'test1')
def test_code(self):
self.assertEqual(IntConstants.TEST0, 0)
self.assertEqual(IntConstants.TEST1, 1)
def test_desc(self):
class TestDescConstants(constants.Constants):
TEST0 = (0, 'test')
TEST1 = (1, 'test')
TEST2 = (2, 'test2')
self.assertEqual(TestDescConstants.TEST0.desc, 'test')
self.assertEqual(TestDescConstants.TEST1.desc, 'test')
self.assertEqual(TestDescConstants.TEST2.desc, 'test2')
def test_equal(self):
class TestEqualConstants(constants.Constants):
TEST = (0, 'test')
class TestEqualConstants2(constants.Constants):
TEST = (0, 'test')
self.assertEqual(TestEqualConstants.TEST, TestEqualConstants.TEST)
self.assertNotEqual(TestEqualConstants.TEST, TestEqualConstants2.TEST)
self.assertEqual(TestEqualConstants.TEST, 0)
self.assertEqual(TestEqualConstants2.TEST, 0)
def test_string_code(self):
self.assertEqual(IntStringCodeConstants.TEST, 'test')
self.assertEqual(IntStringCodeConstants.TEST.code, 'test')
def test_choices_list(self):
self.assertListEqual(IntStringCodeConstants.get_choices_list(), [('test', 'test0'), (1, 'test1')])
def test_desc_dict(self):
self.assertListEqual(IntStringCodeConstants.get_desc_dict(name_key='name'), [
{'name': 'TEST', 'code': 'test', 'desc': 'test0'},
{'name': 'TEST1', 'code': 1, 'desc': 'test1'},
])
| 33.114754
| 106
| 0.641584
|
import unittest
from cool.core import constants
class IntConstants(constants.Constants):
TEST0 = (0, 'test0')
TEST1 = (1, 'test1')
class IntStringCodeConstants(constants.Constants):
TEST = ('test', 'test0')
TEST1 = (1, 'test1')
class ConstantsTests(unittest.TestCase):
def test_unique(self):
with self.assertRaises(ValueError):
class TestUniqueConstants(constants.Constants):
TEST0 = (0, 'test0')
TEST1 = (0, 'test1')
def test_code(self):
self.assertEqual(IntConstants.TEST0, 0)
self.assertEqual(IntConstants.TEST1, 1)
def test_desc(self):
class TestDescConstants(constants.Constants):
TEST0 = (0, 'test')
TEST1 = (1, 'test')
TEST2 = (2, 'test2')
self.assertEqual(TestDescConstants.TEST0.desc, 'test')
self.assertEqual(TestDescConstants.TEST1.desc, 'test')
self.assertEqual(TestDescConstants.TEST2.desc, 'test2')
def test_equal(self):
class TestEqualConstants(constants.Constants):
TEST = (0, 'test')
class TestEqualConstants2(constants.Constants):
TEST = (0, 'test')
self.assertEqual(TestEqualConstants.TEST, TestEqualConstants.TEST)
self.assertNotEqual(TestEqualConstants.TEST, TestEqualConstants2.TEST)
self.assertEqual(TestEqualConstants.TEST, 0)
self.assertEqual(TestEqualConstants2.TEST, 0)
def test_string_code(self):
self.assertEqual(IntStringCodeConstants.TEST, 'test')
self.assertEqual(IntStringCodeConstants.TEST.code, 'test')
def test_choices_list(self):
self.assertListEqual(IntStringCodeConstants.get_choices_list(), [('test', 'test0'), (1, 'test1')])
def test_desc_dict(self):
self.assertListEqual(IntStringCodeConstants.get_desc_dict(name_key='name'), [
{'name': 'TEST', 'code': 'test', 'desc': 'test0'},
{'name': 'TEST1', 'code': 1, 'desc': 'test1'},
])
| true
| true
|
790b5d9bcbb9d5c5c2c40ab62afdba9b47d2525f
| 626
|
py
|
Python
|
common.py
|
ajyl/KEMP
|
d71d34e3fb1d636db7f2cf40f6a3aa0040681389
|
[
"MIT"
] | null | null | null |
common.py
|
ajyl/KEMP
|
d71d34e3fb1d636db7f2cf40f6a3aa0040681389
|
[
"MIT"
] | null | null | null |
common.py
|
ajyl/KEMP
|
d71d34e3fb1d636db7f2cf40f6a3aa0040681389
|
[
"MIT"
] | null | null | null |
"""
Module Doc String
"""
EMOTIONS = [
"sentimental",
"afraid",
"proud",
"faithful",
"terrified",
"joyful",
"angry",
"sad",
"jealous",
"grateful",
"prepared",
"embarrassed",
"excited",
"annoyed",
"lonely",
"ashamed",
"guilty",
"surprised",
"nostalgic",
"confident",
"furious",
"disappointed",
"caring",
"trusting",
"disgusted",
"anticipating",
"anxious",
"hopeful",
"content",
"impressed",
"apprehensive",
"devastated",
]
def main():
""" Driver """
if __name__ == "__main__":
main()
| 12.77551
| 26
| 0.496805
|
EMOTIONS = [
"sentimental",
"afraid",
"proud",
"faithful",
"terrified",
"joyful",
"angry",
"sad",
"jealous",
"grateful",
"prepared",
"embarrassed",
"excited",
"annoyed",
"lonely",
"ashamed",
"guilty",
"surprised",
"nostalgic",
"confident",
"furious",
"disappointed",
"caring",
"trusting",
"disgusted",
"anticipating",
"anxious",
"hopeful",
"content",
"impressed",
"apprehensive",
"devastated",
]
def main():
if __name__ == "__main__":
main()
| true
| true
|
790b5eaa8022d14560bfc4707727abb81d0adce1
| 21,056
|
py
|
Python
|
src/starkware/starknet/apps/starkgate/cairo/token_test.py
|
starkware-libs/starkgate-contracts
|
28f4032b101003b2c6682d753ea61c86b732012c
|
[
"Apache-2.0"
] | 9
|
2022-01-27T20:20:06.000Z
|
2022-03-29T12:05:57.000Z
|
src/starkware/starknet/apps/starkgate/cairo/token_test.py
|
starkware-libs/starkgate-contracts
|
28f4032b101003b2c6682d753ea61c86b732012c
|
[
"Apache-2.0"
] | 2
|
2022-02-16T17:05:56.000Z
|
2022-02-16T17:06:54.000Z
|
src/starkware/starknet/apps/starkgate/cairo/token_test.py
|
starkware-libs/starkgate-contracts
|
28f4032b101003b2c6682d753ea61c86b732012c
|
[
"Apache-2.0"
] | 1
|
2022-02-03T13:39:44.000Z
|
2022-02-03T13:39:44.000Z
|
import asyncio
import copy
import random
from typing import Callable
import pytest
from starkware.starknet.apps.starkgate.cairo.contracts import erc20_contract_def
from starkware.starknet.apps.starkgate.conftest import str_to_felt
from starkware.starknet.testing.contract import StarknetContract
from starkware.starknet.testing.starknet import Starknet
from starkware.starkware_utils.error_handling import StarkException
AMOUNT_BOUND = 2 ** 256
GOVERNOR_ADDRESS = str_to_felt("GOVERNOR")
MINTER_ADDRESS = str_to_felt("MINTER")
L1_ACCOUNT = 1
initial_balances = {1: 13, 2: 10}
uninitialized_account = 3
initial_total_supply = sum(initial_balances.values())
initialized_account = random.choice(list(initial_balances.keys()))
another_account = 4 # Not initialized_account and not uninitialized_account.
# 0 < TRANSFER_AMOUNT < APPROVE_AMOUNT < initial_balance < HIGH_APPROVE_AMOUNT.
TRANSFER_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
APPROVE_AMOUNT = 8
HIGH_APPROVE_AMOUNT = 100
MINT_AMOUNT = 10
BURN_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
async def session_starknet() -> Starknet:
return await Starknet.empty()
@pytest.fixture(scope="session")
async def session_empty_token_contract(
session_starknet: Starknet,
token_name: int,
token_symbol: int,
token_decimals: int,
) -> StarknetContract:
return await session_starknet.deploy(
constructor_calldata=[
token_name,
token_symbol,
token_decimals,
MINTER_ADDRESS,
],
contract_def=erc20_contract_def,
)
@pytest.fixture(scope="session")
async def uint256(session_empty_token_contract: StarknetContract) -> Callable:
def convert_int_to_uint256(num: int):
if num < 0:
num += 2 ** 256
return session_empty_token_contract.Uint256(low=num % 2 ** 128, high=num // 2 ** 128)
return convert_int_to_uint256
@pytest.fixture(scope="session")
async def session_token_contract(
session_empty_token_contract: StarknetContract,
uint256: Callable,
) -> StarknetContract:
for account in initial_balances:
await session_empty_token_contract.permissionedMint(
recipient=account, amount=uint256(initial_balances[account])
).invoke(caller_address=MINTER_ADDRESS)
return session_empty_token_contract
@pytest.fixture
async def starknet(session_starknet: Starknet) -> Starknet:
return copy.deepcopy(session_starknet)
@pytest.fixture
async def token_contract(
starknet: Starknet, session_token_contract: StarknetContract
) -> StarknetContract:
return StarknetContract(
state=starknet.state,
abi=erc20_contract_def.abi,
contract_address=session_token_contract.contract_address,
deploy_execution_info=session_token_contract.deploy_execution_info,
)
@pytest.mark.asyncio
async def test_permitted_minter(token_contract: StarknetContract):
execution_info = await token_contract.permittedMinter().call()
assert execution_info.result == (MINTER_ADDRESS,)
@pytest.mark.asyncio
async def test_name(token_contract: StarknetContract, token_name: int):
execution_info = await token_contract.name().call()
assert execution_info.result == (token_name,)
@pytest.mark.asyncio
async def test_symbol(token_contract: StarknetContract, token_symbol: int):
execution_info = await token_contract.symbol().call()
assert execution_info.result == (token_symbol,)
@pytest.mark.asyncio
async def test_decimal(token_contract: StarknetContract, token_decimals: int):
execution_info = await token_contract.decimals().call()
assert execution_info.result == (token_decimals,)
@pytest.mark.asyncio
async def test_total_supply(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
@pytest.mark.asyncio
async def test_balance_of(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
@pytest.mark.asyncio
async def test_transfer_zero_sender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(sender\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_transfer_zero_recipient(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transfer(recipient=0, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_happy_flow(token_contract: StarknetContract, uint256: Callable):
transfer_amount = uint256(TRANSFER_AMOUNT)
await token_contract.transfer(recipient=uninitialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
expected_balance = uint256(initial_balances[initialized_account] - TRANSFER_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (transfer_amount,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=uninitialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
# Tests the case of sender = recipient.
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
@pytest.mark.asyncio
async def test_approve_zero_owner(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(caller\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_approve_zero_spender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.approve(spender=0, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_happy_flow(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.approve(
spender=uninitialized_account, amount=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
@pytest.mark.asyncio
async def test_transfer_from_zero_sender(token_contract: StarknetContract, uint256: Callable):
# The contract fails when checking for sufficient allowance of account 0.
# Only because we cannot put a balance for address(0) or approve on its behalf.
# Could we do that, we would have failed on the more sensible error assert_not_zero(sender).
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=0, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_zero_recipient(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
await token_contract.approve(spender=another_account, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=0, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(
spender=another_account, amount=uint256(HIGH_APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(spender=another_account, amount=uint256(APPROVE_AMOUNT)).invoke(
caller_address=initialized_account
)
amount = uint256(APPROVE_AMOUNT + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
@pytest.mark.parametrize("approve_num", [APPROVE_AMOUNT, HIGH_APPROVE_AMOUNT])
async def test_transfer_from_happy_flow(
token_contract: StarknetContract, uint256: Callable, approve_num: int
):
await token_contract.approve(spender=another_account, amount=uint256(approve_num)).invoke(
caller_address=initialized_account
)
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_increase_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.increaseAllowance(
spender=0, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(added_value\)"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_overflow(token_contract: StarknetContract, uint256: Callable):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert \(is_overflow\) = 0"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND - APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
approve_amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(spender=0, subtracted_value=approve_amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_decrease_allowance_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(APPROVE_AMOUNT + 1)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(subtracted_value\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_and_decrease_allowance_happy_flow(
token_contract: StarknetContract, uint256: Callable
):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(int(APPROVE_AMOUNT / 2))
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT - int(APPROVE_AMOUNT / 2)),)
@pytest.mark.asyncio
async def test_permissioned_mint_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_mint_zero_recipient(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.permissionedMint(recipient=0, amount=uint256(MINT_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_mint_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_total_supply_out_of_range(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND - initial_total_supply)
with pytest.raises(StarkException, match=f"assert \(is_overflow\) = 0"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=amount
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(MINT_AMOUNT),)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT),)
@pytest.mark.asyncio
async def test_permissioned_burn_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_burn_zero_account(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(account\)"):
await token_contract.permissionedBurn(account=0, amount=uint256(BURN_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_burn_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match=f"assert_not_zero\(enough_balance\)"):
await token_contract.permissionedBurn(account=initialized_account, amount=amount).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=initialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
expected_balance = uint256(initial_balances[initialized_account] + MINT_AMOUNT - BURN_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT - BURN_AMOUNT),)
| 40.885437
| 100
| 0.772226
|
import asyncio
import copy
import random
from typing import Callable
import pytest
from starkware.starknet.apps.starkgate.cairo.contracts import erc20_contract_def
from starkware.starknet.apps.starkgate.conftest import str_to_felt
from starkware.starknet.testing.contract import StarknetContract
from starkware.starknet.testing.starknet import Starknet
from starkware.starkware_utils.error_handling import StarkException
AMOUNT_BOUND = 2 ** 256
GOVERNOR_ADDRESS = str_to_felt("GOVERNOR")
MINTER_ADDRESS = str_to_felt("MINTER")
L1_ACCOUNT = 1
initial_balances = {1: 13, 2: 10}
uninitialized_account = 3
initial_total_supply = sum(initial_balances.values())
initialized_account = random.choice(list(initial_balances.keys()))
another_account = 4
TRANSFER_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
APPROVE_AMOUNT = 8
HIGH_APPROVE_AMOUNT = 100
MINT_AMOUNT = 10
BURN_AMOUNT = int((initial_balances[initialized_account] + 1) / 2)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
async def session_starknet() -> Starknet:
return await Starknet.empty()
@pytest.fixture(scope="session")
async def session_empty_token_contract(
session_starknet: Starknet,
token_name: int,
token_symbol: int,
token_decimals: int,
) -> StarknetContract:
return await session_starknet.deploy(
constructor_calldata=[
token_name,
token_symbol,
token_decimals,
MINTER_ADDRESS,
],
contract_def=erc20_contract_def,
)
@pytest.fixture(scope="session")
async def uint256(session_empty_token_contract: StarknetContract) -> Callable:
def convert_int_to_uint256(num: int):
if num < 0:
num += 2 ** 256
return session_empty_token_contract.Uint256(low=num % 2 ** 128, high=num // 2 ** 128)
return convert_int_to_uint256
@pytest.fixture(scope="session")
async def session_token_contract(
session_empty_token_contract: StarknetContract,
uint256: Callable,
) -> StarknetContract:
for account in initial_balances:
await session_empty_token_contract.permissionedMint(
recipient=account, amount=uint256(initial_balances[account])
).invoke(caller_address=MINTER_ADDRESS)
return session_empty_token_contract
@pytest.fixture
async def starknet(session_starknet: Starknet) -> Starknet:
return copy.deepcopy(session_starknet)
@pytest.fixture
async def token_contract(
starknet: Starknet, session_token_contract: StarknetContract
) -> StarknetContract:
return StarknetContract(
state=starknet.state,
abi=erc20_contract_def.abi,
contract_address=session_token_contract.contract_address,
deploy_execution_info=session_token_contract.deploy_execution_info,
)
@pytest.mark.asyncio
async def test_permitted_minter(token_contract: StarknetContract):
execution_info = await token_contract.permittedMinter().call()
assert execution_info.result == (MINTER_ADDRESS,)
@pytest.mark.asyncio
async def test_name(token_contract: StarknetContract, token_name: int):
execution_info = await token_contract.name().call()
assert execution_info.result == (token_name,)
@pytest.mark.asyncio
async def test_symbol(token_contract: StarknetContract, token_symbol: int):
execution_info = await token_contract.symbol().call()
assert execution_info.result == (token_symbol,)
@pytest.mark.asyncio
async def test_decimal(token_contract: StarknetContract, token_decimals: int):
execution_info = await token_contract.decimals().call()
assert execution_info.result == (token_decimals,)
@pytest.mark.asyncio
async def test_total_supply(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
@pytest.mark.asyncio
async def test_balance_of(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
@pytest.mark.asyncio
async def test_transfer_zero_sender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(sender\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_transfer_zero_recipient(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transfer(recipient=0, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.transfer(recipient=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_transfer_happy_flow(token_contract: StarknetContract, uint256: Callable):
transfer_amount = uint256(TRANSFER_AMOUNT)
await token_contract.transfer(recipient=uninitialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
expected_balance = uint256(initial_balances[initialized_account] - TRANSFER_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (transfer_amount,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply),)
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=uninitialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(0),)
await token_contract.transfer(recipient=initialized_account, amount=transfer_amount).invoke(
caller_address=initialized_account
)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (uint256(initial_balances[initialized_account]),)
@pytest.mark.asyncio
async def test_approve_zero_owner(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(caller\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=0
)
@pytest.mark.asyncio
async def test_approve_zero_spender(token_contract: StarknetContract, uint256: Callable):
amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.approve(spender=0, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_invalid_uint256_amount(token_contract: StarknetContract, uint256: Callable):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="uint256_check\(amount\)"):
await token_contract.approve(spender=uninitialized_account, amount=amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_approve_happy_flow(token_contract: StarknetContract, uint256: Callable):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.approve(
spender=uninitialized_account, amount=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
@pytest.mark.asyncio
async def test_transfer_from_zero_sender(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=0, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_zero_recipient(token_contract: StarknetContract, uint256: Callable):
amount = uint256(TRANSFER_AMOUNT)
await token_contract.approve(spender=another_account, amount=uint256(TRANSFER_AMOUNT)).invoke(
caller_address=initialized_account
)
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=0, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(
spender=another_account, amount=uint256(HIGH_APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_balance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_amount_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.approve(spender=another_account, amount=uint256(APPROVE_AMOUNT)).invoke(
caller_address=initialized_account
)
amount = uint256(APPROVE_AMOUNT + 1)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_transfer_from_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=amount
).invoke(caller_address=another_account)
@pytest.mark.asyncio
@pytest.mark.parametrize("approve_num", [APPROVE_AMOUNT, HIGH_APPROVE_AMOUNT])
async def test_transfer_from_happy_flow(
token_contract: StarknetContract, uint256: Callable, approve_num: int
):
await token_contract.approve(spender=another_account, amount=uint256(approve_num)).invoke(
caller_address=initialized_account
)
await token_contract.transferFrom(
sender=initialized_account, recipient=uninitialized_account, amount=uint256(TRANSFER_AMOUNT)
).invoke(caller_address=another_account)
@pytest.mark.asyncio
async def test_increase_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(spender\)"):
await token_contract.increaseAllowance(
spender=0, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(added_value\)"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_allowance_overflow(token_contract: StarknetContract, uint256: Callable):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert \(is_overflow\) = 0"):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(AMOUNT_BOUND - APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_zero_spender(token_contract: StarknetContract, uint256: Callable):
approve_amount = uint256(APPROVE_AMOUNT)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(spender=0, subtracted_value=approve_amount).invoke(
caller_address=initialized_account
)
@pytest.mark.asyncio
async def test_decrease_allowance_bigger_than_allowance(
token_contract: StarknetContract, uint256: Callable
):
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
with pytest.raises(StarkException, match="assert_not_zero\(enough_allowance\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(APPROVE_AMOUNT + 1)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_decrease_allowance_invalid_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="uint256_check\(subtracted_value\)"):
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(AMOUNT_BOUND)
).invoke(caller_address=initialized_account)
@pytest.mark.asyncio
async def test_increase_and_decrease_allowance_happy_flow(
token_contract: StarknetContract, uint256: Callable
):
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(0),)
await token_contract.increaseAllowance(
spender=uninitialized_account, added_value=uint256(APPROVE_AMOUNT)
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT),)
await token_contract.decreaseAllowance(
spender=uninitialized_account, subtracted_value=uint256(int(APPROVE_AMOUNT / 2))
).invoke(caller_address=initialized_account)
execution_info = await token_contract.allowance(
owner=initialized_account, spender=uninitialized_account
).call()
assert execution_info.result == (uint256(APPROVE_AMOUNT - int(APPROVE_AMOUNT / 2)),)
@pytest.mark.asyncio
async def test_permissioned_mint_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_mint_zero_recipient(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match="assert_not_zero\(recipient\)"):
await token_contract.permissionedMint(recipient=0, amount=uint256(MINT_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_mint_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_total_supply_out_of_range(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(AMOUNT_BOUND - initial_total_supply)
with pytest.raises(StarkException, match=f"assert \(is_overflow\) = 0"):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=amount
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_mint_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=uninitialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
execution_info = await token_contract.balanceOf(account=uninitialized_account).call()
assert execution_info.result == (uint256(MINT_AMOUNT),)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT),)
@pytest.mark.asyncio
async def test_permissioned_burn_wrong_minter(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert caller_address = permitted_address"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS + 1)
@pytest.mark.asyncio
async def test_permissioned_burn_zero_account(token_contract: StarknetContract, uint256: Callable):
with pytest.raises(StarkException, match="assert_not_zero\(account\)"):
await token_contract.permissionedBurn(account=0, amount=uint256(BURN_AMOUNT)).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_invalid_uint256_amount(
token_contract: StarknetContract, uint256: Callable
):
with pytest.raises(StarkException, match=f"uint256_check\(amount\)"):
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(AMOUNT_BOUND)
).invoke(caller_address=MINTER_ADDRESS)
@pytest.mark.asyncio
async def test_permissioned_burn_amount_bigger_than_balance(
token_contract: StarknetContract, uint256: Callable
):
amount = uint256(initial_balances[initialized_account] + 1)
with pytest.raises(StarkException, match=f"assert_not_zero\(enough_balance\)"):
await token_contract.permissionedBurn(account=initialized_account, amount=amount).invoke(
caller_address=MINTER_ADDRESS
)
@pytest.mark.asyncio
async def test_permissioned_burn_happy_flow(token_contract: StarknetContract, uint256: Callable):
await token_contract.permissionedMint(
recipient=initialized_account, amount=uint256(MINT_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
await token_contract.permissionedBurn(
account=initialized_account, amount=uint256(BURN_AMOUNT)
).invoke(caller_address=MINTER_ADDRESS)
expected_balance = uint256(initial_balances[initialized_account] + MINT_AMOUNT - BURN_AMOUNT)
execution_info = await token_contract.balanceOf(account=initialized_account).call()
assert execution_info.result == (expected_balance,)
execution_info = await token_contract.totalSupply().call()
assert execution_info.result == (uint256(initial_total_supply + MINT_AMOUNT - BURN_AMOUNT),)
| true
| true
|
790b6074975f08f6910415dfeafedc37eec6ef0a
| 14,960
|
py
|
Python
|
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/operationalinsights/storage_insight_config.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StorageInsightConfigArgs', 'StorageInsightConfig']
@pulumi.input_type
class StorageInsightConfigArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
storage_account: pulumi.Input['StorageAccountArgs'],
workspace_name: pulumi.Input[str],
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a StorageInsightConfig resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['StorageAccountArgs'] storage_account: The storage account connection details
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read
:param pulumi.Input[str] e_tag: The ETag of the storage insight.
:param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "storage_account", storage_account)
pulumi.set(__self__, "workspace_name", workspace_name)
if containers is not None:
pulumi.set(__self__, "containers", containers)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if storage_insight_name is not None:
pulumi.set(__self__, "storage_insight_name", storage_insight_name)
if tables is not None:
pulumi.set(__self__, "tables", tables)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> pulumi.Input['StorageAccountArgs']:
"""
The storage account connection details
"""
return pulumi.get(self, "storage_account")
@storage_account.setter
def storage_account(self, value: pulumi.Input['StorageAccountArgs']):
pulumi.set(self, "storage_account", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The names of the blob containers that the workspace should read
"""
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
The ETag of the storage insight.
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter(name="storageInsightName")
def storage_insight_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the storageInsightsConfigs resource
"""
return pulumi.get(self, "storage_insight_name")
@storage_insight_name.setter
def storage_insight_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_insight_name", value)
@property
@pulumi.getter
def tables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The names of the Azure tables that the workspace should read
"""
return pulumi.get(self, "tables")
@tables.setter
def tables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tables", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class StorageInsightConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account: Optional[pulumi.Input[pulumi.InputType['StorageAccountArgs']]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The top level storage insight resource container.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] containers: The names of the blob containers that the workspace should read
:param pulumi.Input[str] e_tag: The ETag of the storage insight.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['StorageAccountArgs']] storage_account: The storage account connection details
:param pulumi.Input[str] storage_insight_name: Name of the storageInsightsConfigs resource
:param pulumi.Input[Sequence[pulumi.Input[str]]] tables: The names of the Azure tables that the workspace should read
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StorageInsightConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The top level storage insight resource container.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param StorageInsightConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StorageInsightConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account: Optional[pulumi.Input[pulumi.InputType['StorageAccountArgs']]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageInsightConfigArgs.__new__(StorageInsightConfigArgs)
__props__.__dict__["containers"] = containers
__props__.__dict__["e_tag"] = e_tag
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if storage_account is None and not opts.urn:
raise TypeError("Missing required property 'storage_account'")
__props__.__dict__["storage_account"] = storage_account
__props__.__dict__["storage_insight_name"] = storage_insight_name
__props__.__dict__["tables"] = tables
__props__.__dict__["tags"] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:operationalinsights:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20150320:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20150320:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20200301preview:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20200301preview:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20200801:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20200801:StorageInsightConfig")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(StorageInsightConfig, __self__).__init__(
'azure-native:operationalinsights:StorageInsightConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StorageInsightConfig':
"""
Get an existing StorageInsightConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageInsightConfigArgs.__new__(StorageInsightConfigArgs)
__props__.__dict__["containers"] = None
__props__.__dict__["e_tag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["storage_account"] = None
__props__.__dict__["tables"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return StorageInsightConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def containers(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The names of the blob containers that the workspace should read
"""
return pulumi.get(self, "containers")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
The ETag of the storage insight.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StorageInsightStatusResponse']:
"""
The status of the storage insight
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> pulumi.Output['outputs.StorageAccountResponse']:
"""
The storage account connection details
"""
return pulumi.get(self, "storage_account")
@property
@pulumi.getter
def tables(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The names of the Azure tables that the workspace should read
"""
return pulumi.get(self, "tables")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 44.391691
| 670
| 0.654412
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StorageInsightConfigArgs', 'StorageInsightConfig']
@pulumi.input_type
class StorageInsightConfigArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
storage_account: pulumi.Input['StorageAccountArgs'],
workspace_name: pulumi.Input[str],
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "storage_account", storage_account)
pulumi.set(__self__, "workspace_name", workspace_name)
if containers is not None:
pulumi.set(__self__, "containers", containers)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if storage_insight_name is not None:
pulumi.set(__self__, "storage_insight_name", storage_insight_name)
if tables is not None:
pulumi.set(__self__, "tables", tables)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> pulumi.Input['StorageAccountArgs']:
return pulumi.get(self, "storage_account")
@storage_account.setter
def storage_account(self, value: pulumi.Input['StorageAccountArgs']):
pulumi.set(self, "storage_account", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter(name="storageInsightName")
def storage_insight_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "storage_insight_name")
@storage_insight_name.setter
def storage_insight_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_insight_name", value)
@property
@pulumi.getter
def tables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "tables")
@tables.setter
def tables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tables", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class StorageInsightConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account: Optional[pulumi.Input[pulumi.InputType['StorageAccountArgs']]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: StorageInsightConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StorageInsightConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account: Optional[pulumi.Input[pulumi.InputType['StorageAccountArgs']]] = None,
storage_insight_name: Optional[pulumi.Input[str]] = None,
tables: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageInsightConfigArgs.__new__(StorageInsightConfigArgs)
__props__.__dict__["containers"] = containers
__props__.__dict__["e_tag"] = e_tag
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if storage_account is None and not opts.urn:
raise TypeError("Missing required property 'storage_account'")
__props__.__dict__["storage_account"] = storage_account
__props__.__dict__["storage_insight_name"] = storage_insight_name
__props__.__dict__["tables"] = tables
__props__.__dict__["tags"] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:operationalinsights:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20150320:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20150320:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20200301preview:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20200301preview:StorageInsightConfig"), pulumi.Alias(type_="azure-native:operationalinsights/v20200801:StorageInsightConfig"), pulumi.Alias(type_="azure-nextgen:operationalinsights/v20200801:StorageInsightConfig")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(StorageInsightConfig, __self__).__init__(
'azure-native:operationalinsights:StorageInsightConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StorageInsightConfig':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageInsightConfigArgs.__new__(StorageInsightConfigArgs)
__props__.__dict__["containers"] = None
__props__.__dict__["e_tag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["storage_account"] = None
__props__.__dict__["tables"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return StorageInsightConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def containers(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "containers")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StorageInsightStatusResponse']:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> pulumi.Output['outputs.StorageAccountResponse']:
return pulumi.get(self, "storage_account")
@property
@pulumi.getter
def tables(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "tables")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true
| true
|
790b60aed69857200e1eeae7fc3858ded428e58c
| 10,715
|
py
|
Python
|
homeassistant/components/ozw/entity.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
homeassistant/components/ozw/entity.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/ozw/entity.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""Generic Z-Wave Entity Classes."""
import copy
import logging
from openzwavemqtt.const import (
EVENT_INSTANCE_STATUS_CHANGED,
EVENT_VALUE_CHANGED,
OZW_READY_STATES,
CommandClass,
ValueIndex,
)
from openzwavemqtt.models.node import OZWNode
from openzwavemqtt.models.value import OZWValue
from homeassistant.const import ATTR_NAME, ATTR_SW_VERSION, ATTR_VIA_DEVICE
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo, Entity
from . import const
from .const import DOMAIN, PLATFORMS
from .discovery import check_node_schema, check_value_schema
_LOGGER = logging.getLogger(__name__)
OZW_READY_STATES_VALUES = {st.value for st in OZW_READY_STATES}
class ZWaveDeviceEntityValues:
"""Manages entity access to the underlying Z-Wave value objects."""
def __init__(self, hass, options, schema, primary_value):
"""Initialize the values object with the passed entity schema."""
self._hass = hass
self._entity_created = False
self._schema = copy.deepcopy(schema)
self._values = {}
self.options = options
# Go through values listed in the discovery schema, initialize them,
# and add a check to the schema to make sure the Instance matches.
for name, disc_settings in self._schema[const.DISC_VALUES].items():
self._values[name] = None
disc_settings[const.DISC_INSTANCE] = (primary_value.instance,)
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
def async_setup(self):
"""Set up values instance."""
# Check values that have already been discovered for node
# and see if they match the schema and need added to the entity.
for value in self._node.values():
self.async_check_value(value)
# Check if all the _required_ values in the schema are present and
# create the entity.
self._async_check_entity_ready()
def __getattr__(self, name):
"""Get the specified value for this entity."""
return self._values.get(name, None)
def __iter__(self):
"""Allow iteration over all values."""
return iter(self._values.values())
def __contains__(self, name):
"""Check if the specified name/key exists in the values."""
return name in self._values
@callback
def async_check_value(self, value):
"""Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
"""
# Make sure the node matches the schema for this entity.
if not check_node_schema(value.node, self._schema):
return
# Go through the possible values for this entity defined by the schema.
for name, name_value in self._values.items():
# Skip if it's already been added.
if name_value is not None:
continue
# Skip if the value doesn't match the schema.
if not check_value_schema(value, self._schema[const.DISC_VALUES][name]):
continue
# Add value to mapping.
self._values[name] = value
# If the entity has already been created, notify it of the new value.
if self._entity_created:
async_dispatcher_send(
self._hass, f"{DOMAIN}_{self.values_id}_value_added"
)
# Check if entity has all required values and create the entity if needed.
self._async_check_entity_ready()
@callback
def _async_check_entity_ready(self):
"""Check if all required values are discovered and create entity."""
# Abort if the entity has already been created
if self._entity_created:
return
# Go through values defined in the schema and abort if a required value is missing.
for name, disc_settings in self._schema[const.DISC_VALUES].items():
if self._values[name] is None and not disc_settings.get(
const.DISC_OPTIONAL
):
return
# We have all the required values, so create the entity.
component = self._schema[const.DISC_COMPONENT]
_LOGGER.debug(
"Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Index=%s, Value type=%s, "
"Genre=%s as %s",
self._node.node_id,
self._node.node_generic,
self._node.node_specific,
self.primary.command_class,
self.primary.index,
self.primary.type,
self.primary.genre,
component,
)
self._entity_created = True
if component in PLATFORMS:
async_dispatcher_send(self._hass, f"{DOMAIN}_new_{component}", self)
@property
def values_id(self):
"""Identification for this values collection."""
return create_value_id(self.primary)
class ZWaveDeviceEntity(Entity):
"""Generic Entity Class for a Z-Wave Device."""
def __init__(self, values):
"""Initialize a generic Z-Wave device entity."""
self.values = values
self.options = values.options
@callback
def on_value_update(self):
"""Call when a value is added/updated in the entity EntityValues Collection.
To be overridden by platforms needing this event.
"""
async def async_added_to_hass(self):
"""Call when entity is added."""
# Add dispatcher and OZW listeners callbacks.
# Add to on_remove so they will be cleaned up on entity removal.
self.async_on_remove(
self.options.listen(EVENT_VALUE_CHANGED, self._value_changed)
)
self.async_on_remove(
self.options.listen(EVENT_INSTANCE_STATUS_CHANGED, self._instance_updated)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, const.SIGNAL_DELETE_ENTITY, self._delete_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self.values.values_id}_value_added",
self._value_added,
)
)
@property
def device_info(self) -> DeviceInfo:
"""Return device information for the device registry."""
node = self.values.primary.node
node_instance = self.values.primary.instance
dev_id = create_device_id(node, self.values.primary.instance)
node_firmware = node.get_value(
CommandClass.VERSION, ValueIndex.VERSION_APPLICATION
)
device_info = DeviceInfo(
identifiers={(DOMAIN, dev_id)},
name=create_device_name(node),
manufacturer=node.node_manufacturer_name,
model=node.node_product_name,
)
if node_firmware is not None:
device_info[ATTR_SW_VERSION] = node_firmware.value
# device with multiple instances is split up into virtual devices for each instance
if node_instance > 1:
parent_dev_id = create_device_id(node)
device_info[ATTR_NAME] += f" - Instance {node_instance}"
device_info[ATTR_VIA_DEVICE] = (DOMAIN, parent_dev_id)
return device_info
@property
def extra_state_attributes(self):
"""Return the device specific state attributes."""
return {const.ATTR_NODE_ID: self.values.primary.node.node_id}
@property
def name(self):
"""Return the name of the entity."""
node = self.values.primary.node
return f"{create_device_name(node)}: {self.values.primary.label}"
@property
def unique_id(self):
"""Return the unique_id of the entity."""
return self.values.values_id
@property
def available(self) -> bool:
"""Return entity availability."""
# Use OZW Daemon status for availability.
instance_status = self.values.primary.ozw_instance.get_status()
return instance_status and instance_status.status in OZW_READY_STATES_VALUES
@callback
def _value_changed(self, value):
"""Call when a value from ZWaveDeviceEntityValues is changed.
Should not be overridden by subclasses.
"""
if value.value_id_key in (v.value_id_key for v in self.values if v):
self.on_value_update()
self.async_write_ha_state()
@callback
def _value_added(self):
"""Call when a value from ZWaveDeviceEntityValues is added.
Should not be overridden by subclasses.
"""
self.on_value_update()
@callback
def _instance_updated(self, new_status):
"""Call when the instance status changes.
Should not be overridden by subclasses.
"""
self.on_value_update()
self.async_write_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
async def _delete_callback(self, values_id):
"""Remove this entity."""
if not self.values:
return # race condition: delete already requested
if values_id == self.values.values_id:
await self.async_remove(force_remove=True)
def create_device_name(node: OZWNode):
"""Generate sensible (short) default device name from a OZWNode."""
# Prefer custom name set by OZWAdmin if present
if node.node_name:
return node.node_name
# Prefer short devicename from metadata if present
if node.meta_data and node.meta_data.get("Name"):
return node.meta_data["Name"]
# Fallback to productname or devicetype strings
if node.node_product_name:
return node.node_product_name
if node.node_device_type_string:
return node.node_device_type_string
if node.node_specific_string:
return node.node_specific_string
# Last resort: use Node id (should never happen, but just in case)
return f"Node {node.id}"
def create_device_id(node: OZWNode, node_instance: int = 1):
"""Generate unique device_id from a OZWNode."""
ozw_instance = node.parent.id
dev_id = f"{ozw_instance}.{node.node_id}.{node_instance}"
return dev_id
def create_value_id(value: OZWValue):
"""Generate unique value_id from an OZWValue."""
# [OZW_INSTANCE_ID]-[NODE_ID]-[VALUE_ID_KEY]
return f"{value.node.parent.id}-{value.node.id}-{value.value_id_key}"
| 35.131148
| 91
| 0.650863
|
import copy
import logging
from openzwavemqtt.const import (
EVENT_INSTANCE_STATUS_CHANGED,
EVENT_VALUE_CHANGED,
OZW_READY_STATES,
CommandClass,
ValueIndex,
)
from openzwavemqtt.models.node import OZWNode
from openzwavemqtt.models.value import OZWValue
from homeassistant.const import ATTR_NAME, ATTR_SW_VERSION, ATTR_VIA_DEVICE
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import DeviceInfo, Entity
from . import const
from .const import DOMAIN, PLATFORMS
from .discovery import check_node_schema, check_value_schema
_LOGGER = logging.getLogger(__name__)
OZW_READY_STATES_VALUES = {st.value for st in OZW_READY_STATES}
class ZWaveDeviceEntityValues:
def __init__(self, hass, options, schema, primary_value):
self._hass = hass
self._entity_created = False
self._schema = copy.deepcopy(schema)
self._values = {}
self.options = options
for name, disc_settings in self._schema[const.DISC_VALUES].items():
self._values[name] = None
disc_settings[const.DISC_INSTANCE] = (primary_value.instance,)
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
def async_setup(self):
for value in self._node.values():
self.async_check_value(value)
self._async_check_entity_ready()
def __getattr__(self, name):
return self._values.get(name, None)
def __iter__(self):
return iter(self._values.values())
def __contains__(self, name):
return name in self._values
@callback
def async_check_value(self, value):
if not check_node_schema(value.node, self._schema):
return
for name, name_value in self._values.items():
if name_value is not None:
continue
# Skip if the value doesn't match the schema.
if not check_value_schema(value, self._schema[const.DISC_VALUES][name]):
continue
self._values[name] = value
if self._entity_created:
async_dispatcher_send(
self._hass, f"{DOMAIN}_{self.values_id}_value_added"
)
self._async_check_entity_ready()
@callback
def _async_check_entity_ready(self):
if self._entity_created:
return
for name, disc_settings in self._schema[const.DISC_VALUES].items():
if self._values[name] is None and not disc_settings.get(
const.DISC_OPTIONAL
):
return
component = self._schema[const.DISC_COMPONENT]
_LOGGER.debug(
"Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Index=%s, Value type=%s, "
"Genre=%s as %s",
self._node.node_id,
self._node.node_generic,
self._node.node_specific,
self.primary.command_class,
self.primary.index,
self.primary.type,
self.primary.genre,
component,
)
self._entity_created = True
if component in PLATFORMS:
async_dispatcher_send(self._hass, f"{DOMAIN}_new_{component}", self)
@property
def values_id(self):
return create_value_id(self.primary)
class ZWaveDeviceEntity(Entity):
def __init__(self, values):
self.values = values
self.options = values.options
@callback
def on_value_update(self):
async def async_added_to_hass(self):
self.async_on_remove(
self.options.listen(EVENT_VALUE_CHANGED, self._value_changed)
)
self.async_on_remove(
self.options.listen(EVENT_INSTANCE_STATUS_CHANGED, self._instance_updated)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, const.SIGNAL_DELETE_ENTITY, self._delete_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self.values.values_id}_value_added",
self._value_added,
)
)
@property
def device_info(self) -> DeviceInfo:
node = self.values.primary.node
node_instance = self.values.primary.instance
dev_id = create_device_id(node, self.values.primary.instance)
node_firmware = node.get_value(
CommandClass.VERSION, ValueIndex.VERSION_APPLICATION
)
device_info = DeviceInfo(
identifiers={(DOMAIN, dev_id)},
name=create_device_name(node),
manufacturer=node.node_manufacturer_name,
model=node.node_product_name,
)
if node_firmware is not None:
device_info[ATTR_SW_VERSION] = node_firmware.value
if node_instance > 1:
parent_dev_id = create_device_id(node)
device_info[ATTR_NAME] += f" - Instance {node_instance}"
device_info[ATTR_VIA_DEVICE] = (DOMAIN, parent_dev_id)
return device_info
@property
def extra_state_attributes(self):
return {const.ATTR_NODE_ID: self.values.primary.node.node_id}
@property
def name(self):
node = self.values.primary.node
return f"{create_device_name(node)}: {self.values.primary.label}"
@property
def unique_id(self):
return self.values.values_id
@property
def available(self) -> bool:
instance_status = self.values.primary.ozw_instance.get_status()
return instance_status and instance_status.status in OZW_READY_STATES_VALUES
@callback
def _value_changed(self, value):
if value.value_id_key in (v.value_id_key for v in self.values if v):
self.on_value_update()
self.async_write_ha_state()
@callback
def _value_added(self):
self.on_value_update()
@callback
def _instance_updated(self, new_status):
self.on_value_update()
self.async_write_ha_state()
@property
def should_poll(self):
return False
async def _delete_callback(self, values_id):
if not self.values:
return
if values_id == self.values.values_id:
await self.async_remove(force_remove=True)
def create_device_name(node: OZWNode):
if node.node_name:
return node.node_name
if node.meta_data and node.meta_data.get("Name"):
return node.meta_data["Name"]
if node.node_product_name:
return node.node_product_name
if node.node_device_type_string:
return node.node_device_type_string
if node.node_specific_string:
return node.node_specific_string
return f"Node {node.id}"
def create_device_id(node: OZWNode, node_instance: int = 1):
ozw_instance = node.parent.id
dev_id = f"{ozw_instance}.{node.node_id}.{node_instance}"
return dev_id
def create_value_id(value: OZWValue):
return f"{value.node.parent.id}-{value.node.id}-{value.value_id_key}"
| true
| true
|
790b60f8512838b13340af7d96cdcfece2d4199b
| 214
|
py
|
Python
|
cflearn_deploy/constants.py
|
carefree0910/carefree-learn-deploy
|
53fa62d2bac52e396ed16e82ab0210459b6f13f0
|
[
"MIT"
] | 1
|
2021-09-02T09:35:53.000Z
|
2021-09-02T09:35:53.000Z
|
cflearn_deploy/constants.py
|
carefree0910/carefree-learn-deploy
|
53fa62d2bac52e396ed16e82ab0210459b6f13f0
|
[
"MIT"
] | null | null | null |
cflearn_deploy/constants.py
|
carefree0910/carefree-learn-deploy
|
53fa62d2bac52e396ed16e82ab0210459b6f13f0
|
[
"MIT"
] | null | null | null |
INFO_PREFIX = "> [ info ] "
ERROR_PREFIX = "> [warning] "
WARNING_PREFIX = "> [ error ] "
INPUT_KEY = "input"
LATENT_KEY = "latent"
PREDICTIONS_KEY = "predictions"
LABEL_KEY = "labels"
SQLITE_FILE = "sqlite.db"
| 19.454545
| 31
| 0.668224
|
INFO_PREFIX = "> [ info ] "
ERROR_PREFIX = "> [warning] "
WARNING_PREFIX = "> [ error ] "
INPUT_KEY = "input"
LATENT_KEY = "latent"
PREDICTIONS_KEY = "predictions"
LABEL_KEY = "labels"
SQLITE_FILE = "sqlite.db"
| true
| true
|
790b638ddaf648a03ee15af7ba55ba7a69da742a
| 3,311
|
py
|
Python
|
feedputter.py
|
amake/puttools-py
|
7c03356ba0bac59697c921ff9447f12be07390aa
|
[
"MIT"
] | null | null | null |
feedputter.py
|
amake/puttools-py
|
7c03356ba0bac59697c921ff9447f12be07390aa
|
[
"MIT"
] | null | null | null |
feedputter.py
|
amake/puttools-py
|
7c03356ba0bac59697c921ff9447f12be07390aa
|
[
"MIT"
] | null | null | null |
'''
Created on 2012/09/03
@author: amake
'''
from __future__ import print_function
import os
import sys
import urllib
import codecs
from datetime import datetime
from xml.etree import ElementTree
import putio
CACHE_FILE = "cache.txt"
FEEDS_FILE = "feeds.txt"
DEBUG = True
PUTIOAPI = None
# Stupid CloudFlare decided to block "non-standard" browsers.
# Spoofing the user-agent gets around it.
class CustomURLopener(urllib.FancyURLopener):
version = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) '
'AppleWebKit/536.26.17 (KHTML like Gecko) Version/6.0.2 Safari/536.26.17'
urllib._urlopener = CustomURLopener()
def log(message):
if DEBUG:
print(message.encode('utf-8'))
class feedputter():
'''
Grab torrent files from an RSS feed.
'''
def __init__(self, feed):
'''
Constructor
'''
self.feed = feed
self.cache = []
if os.path.isfile(CACHE_FILE):
self.cache = [line.strip() for line in codecs.open(
CACHE_FILE, 'r', 'utf-8').readlines()]
def __get_items(self):
log("Fetching feed from: %s" % self.feed)
data = urllib.urlopen(self.feed).read()
tree = ElementTree.fromstring(data)
return tree.findall(".//item")
def save_torrent(self, link, target, title):
torrent = urllib.urlopen(link)
if (torrent.getcode() != 200):
log("Error " + torrent.getcode())
return False
with open(os.path.join(target, title + ".torrent"), "w") as out:
out.write(torrent.read())
return True
def putio(self, link, target, title):
api = putio.get_api(target_folder=target)
try:
api.add(link, putio.CALLBACK_URL + '?amk_type=tv')
except Exception as e:
print(e)
print('Skipping.')
return False
return True
def get_to(self, target, method):
'''
Fetch linked torrents and save to the specified output folder.
'''
for item in self.__get_items():
title = item.find('title').text.strip()
link = item.find('link').text
log("Found " + title)
if title in self.cache:
log("Already gotten. Skipping.")
continue
log("Getting ... ")
if not method(link, target, title):
continue
with codecs.open(CACHE_FILE, "a", "utf-8") as tmp:
tmp.write(title + "\n")
log("Done")
def usage():
print('Usage: {0} TARGET_DIR'.format(os.path.basename(__file__)))
def main():
if len(sys.argv) < 2:
usage()
sys.exit(1)
if not os.path.isdir(sys.argv[1]):
print('Directory not found or not a directory:', sys.argv[1])
print()
usage()
sys.exit(1)
os.chdir(os.path.dirname(__file__))
feeds = [line.strip() for line in open(FEEDS_FILE).readlines()]
log(datetime.now().isoformat(" ") +
" Starting feedputter with {0} feeds".format(len(feeds)))
for feed in feeds:
getter = feedputter(feed)
getter.get_to(sys.argv[1], getter.putio)
log(datetime.now().isoformat(" ") + " Finished feedputter")
if __name__ == "__main__":
main()
| 22.52381
| 77
| 0.578677
|
from __future__ import print_function
import os
import sys
import urllib
import codecs
from datetime import datetime
from xml.etree import ElementTree
import putio
CACHE_FILE = "cache.txt"
FEEDS_FILE = "feeds.txt"
DEBUG = True
PUTIOAPI = None
class CustomURLopener(urllib.FancyURLopener):
version = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) '
urllib._urlopener = CustomURLopener()
def log(message):
if DEBUG:
print(message.encode('utf-8'))
class feedputter():
def __init__(self, feed):
self.feed = feed
self.cache = []
if os.path.isfile(CACHE_FILE):
self.cache = [line.strip() for line in codecs.open(
CACHE_FILE, 'r', 'utf-8').readlines()]
def __get_items(self):
log("Fetching feed from: %s" % self.feed)
data = urllib.urlopen(self.feed).read()
tree = ElementTree.fromstring(data)
return tree.findall(".//item")
def save_torrent(self, link, target, title):
torrent = urllib.urlopen(link)
if (torrent.getcode() != 200):
log("Error " + torrent.getcode())
return False
with open(os.path.join(target, title + ".torrent"), "w") as out:
out.write(torrent.read())
return True
def putio(self, link, target, title):
api = putio.get_api(target_folder=target)
try:
api.add(link, putio.CALLBACK_URL + '?amk_type=tv')
except Exception as e:
print(e)
print('Skipping.')
return False
return True
def get_to(self, target, method):
for item in self.__get_items():
title = item.find('title').text.strip()
link = item.find('link').text
log("Found " + title)
if title in self.cache:
log("Already gotten. Skipping.")
continue
log("Getting ... ")
if not method(link, target, title):
continue
with codecs.open(CACHE_FILE, "a", "utf-8") as tmp:
tmp.write(title + "\n")
log("Done")
def usage():
print('Usage: {0} TARGET_DIR'.format(os.path.basename(__file__)))
def main():
if len(sys.argv) < 2:
usage()
sys.exit(1)
if not os.path.isdir(sys.argv[1]):
print('Directory not found or not a directory:', sys.argv[1])
print()
usage()
sys.exit(1)
os.chdir(os.path.dirname(__file__))
feeds = [line.strip() for line in open(FEEDS_FILE).readlines()]
log(datetime.now().isoformat(" ") +
" Starting feedputter with {0} feeds".format(len(feeds)))
for feed in feeds:
getter = feedputter(feed)
getter.get_to(sys.argv[1], getter.putio)
log(datetime.now().isoformat(" ") + " Finished feedputter")
if __name__ == "__main__":
main()
| true
| true
|
790b64737ed269486552427906fc8ec51b4f21dd
| 19,796
|
py
|
Python
|
pecos/qeccs/surface_medial_4444/instructions.py
|
quantum-pecos/PECOS
|
44bc614a9152f3b316bacef6ca034f6a8a611293
|
[
"Apache-2.0"
] | 15
|
2019-04-11T16:02:38.000Z
|
2022-03-15T16:56:36.000Z
|
pecos/qeccs/surface_medial_4444/instructions.py
|
quantum-pecos/PECOS
|
44bc614a9152f3b316bacef6ca034f6a8a611293
|
[
"Apache-2.0"
] | 4
|
2018-10-04T19:30:09.000Z
|
2019-03-12T19:00:34.000Z
|
pecos/qeccs/surface_medial_4444/instructions.py
|
quantum-pecos/PECOS
|
44bc614a9152f3b316bacef6ca034f6a8a611293
|
[
"Apache-2.0"
] | 3
|
2020-10-07T16:47:16.000Z
|
2022-02-01T05:34:54.000Z
|
# ========================================================================= #
# Copyright 2018 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================= #
from ..instruction_parent_class import LogicalInstruction
from ...circuits.quantum_circuit import QuantumCircuit
from ..helper_functions import pos2qudit
class InstrSynExtraction(LogicalInstruction):
"""
Instruction for a round of syndrome extraction.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
qecc_init_ticks = qecc.qecc_params.get('init_ticks', 0)
qecc_meas_ticks = qecc.qecc_params.get('meas_ticks', 7)
qecc_x_ticks = qecc.qecc_params.get('x_ticks', [2, 4, 3, 5])
qecc_z_ticks = qecc.qecc_params.get('z_ticks', [2, 4, 3, 5])
self.init_ticks = gate_params.get('init_ticks', qecc_init_ticks)
self.meas_ticks = gate_params.get('meas_ticks', qecc_meas_ticks)
self.x_ticks = gate_params.get('x_ticks', qecc_x_ticks)
self.z_ticks = gate_params.get('z_ticks', qecc_z_ticks)
self.abstract_circuit = QuantumCircuit(**gate_params)
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
self.ancilla_x_check = set([])
self.ancilla_z_check = set([])
# Go through the ancillas and grab the data qubits that are on either side of it.
layout = qecc.layout # qudit_id => (x, y)
self.pos2qudit = pos2qudit(layout)
for q, (x, y) in layout.items():
if x % 2 == 0 and y % 2 == 0:
# Ancilla
if x % 4 == y % 4:
# X check
self._create_x_check(q, x, y)
else:
# Z check
self._create_z_check(q, x, y)
# Determine the logical operations
# --------------------------------
z_qudits = set(qecc.sides['top'])
x_qudits = set(qecc.sides['left'])
logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': QuantumCircuit([{'X': x_qudits}]), 'Z': QuantumCircuit([{'Z': z_qudits}])},
]
self.initial_logical_ops = logical_ops
logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': QuantumCircuit([{'X': x_qudits}]), 'Z': QuantumCircuit([{'Z': z_qudits}])},
]
self.final_logical_ops = logical_ops
self.logical_signs = None
self.logical_stabilizers = None
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
def _create_x_check(self, ancilla, x, y):
"""
Creates X-checks for circuit_extended.
"""
# register the x syndrome ancillas
self.ancilla_x_check.add(ancilla)
# get where the position of where the data qubits should be relative to the ancilla
data_pos = self._data_pos_x_check(x, y)
# Get the actual, available data-qubits and their ticks that correspond to the possible data qubit positions
datas, my_data_ticks = self._find_data(position_to_qudit=self.pos2qudit, positions=data_pos,
ticks=self.x_ticks)
# Now add the check to the extended circuit
locations = set(datas)
locations.add(ancilla)
self.abstract_circuit.append('X check', locations=locations, datas=datas, ancillas=ancilla,
ancilla_ticks=self.init_ticks, data_ticks=my_data_ticks,
meas_ticks=self.meas_ticks)
def _create_z_check(self, ancilla, x, y):
"""
Creates Z-checks for circuit_extended.
"""
# register the z syndrome ancillas
self.ancilla_z_check.add(ancilla)
# get where the position of where the data qubits should be relative to the ancilla
data_pos = self._data_pos_z_check(x, y)
# Get the actual, available data-qubits and their ticks that correspond to the possible data qubit positions
datas, my_data_ticks = self._find_data(position_to_qudit=self.pos2qudit, positions=data_pos,
ticks=self.z_ticks)
# Now add the check to the extended circuit
locations = set(datas)
locations.add(ancilla)
self.abstract_circuit.append('Z check', locations=locations, datas=datas, ancillas=ancilla,
ancilla_ticks=self.init_ticks, data_ticks=my_data_ticks,
meas_ticks=self.meas_ticks)
@staticmethod
def _find_data(position_to_qudit, positions, ticks):
"""
From the positions given for possible data qudits, add the qudits and their corresponding ticks for each qudit
that does exist.
:param position_to_qudit:
:param positions:
:param ticks:
:return:
"""
data_list = []
tick_list = []
for i, p in enumerate(positions):
data = position_to_qudit.get(p, None)
if data is not None:
data_list.append(data)
tick_list.append(ticks[i])
return data_list, tick_list
@staticmethod
def _data_pos_z_check(x, y):
"""
Determines the position of data qudits in a Z check in order of ticks.
Check direction: 1 | 2
|
---+---
|
3 | 4
"""
data_pos = [
(x - 1, y + 1),
(x + 1, y + 1),
(x - 1, y - 1),
(x + 1, y - 1)
]
return data_pos
@staticmethod
def _data_pos_x_check(x, y):
"""
Determines the position of data qudits in a Z check in order of ticks.
Check direction: 1 | 3
|
---+---
|
2 | 4
"""
data_pos = [
(x - 1, y + 1),
(x - 1, y - 1),
(x + 1, y + 1),
(x + 1, y - 1)
]
return data_pos
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
if self.qecc.height != self.qecc.width:
raise Exception('This currently only works for square code blocks.')
# instr = self.instruction('instr_syn_extract')
instr = self
stabs_row_x = []
stabs_row_z = []
destabs_row_x = []
destabs_row_z = []
for a in self.ancilla_qudit_set:
stabs_row_z.append({a})
stabs_row_x.append(set([]))
destabs_row_x.append({a})
destabs_row_z.append(set([]))
xdestabs = self.generate_xdestabs()
zdestabs = self.generate_zdestabs()
# Creating stabilizers
for check_type, _, params in instr.abstract_circuit.items():
if check_type == 'X check':
# Ancillas initialized in |0>
# Pauli X-type stabilizers
stabs_row_x.append(set(params['datas']))
stabs_row_z.append(set([]))
destabs_row_x.append(set([]))
destabs_row_z.append(zdestabs[params['ancillas']])
else:
# Ancillas initialized in |0>
# Pauli Z-type stabilizers
stabs_row_z.append(set(params['datas']))
stabs_row_x.append(set([]))
destabs_row_z.append(set([]))
destabs_row_x.append(xdestabs[params['ancillas']])
output_dict = {
'stabs_x': stabs_row_x,
'stabs_z': stabs_row_z,
'destabs_x': destabs_row_x,
'destabs_z': destabs_row_z,
}
self._stabs_destabs = output_dict
return output_dict
def generate_xdestabs(self):
distance = self.qecc.distance
# x-type destabilizers
xdestabs_temp = []
# going alone the bottom
if distance % 2 == 0:
b = 1
else:
b = 2
for x in range(b, distance, 2):
temp = []
y = distance - 1
for j in range(0, distance):
new_point = (x + j, y - j)
if new_point[1] <= 0:
break
if new_point[0] > distance - 1:
break
temp.append(new_point)
xdestabs_temp.append(temp)
# ----------------
xdestabs = []
for ds in xdestabs_temp:
for i in range(len(ds)):
temp = []
for j in range(i + 1):
# print('-', i, j)
temp.append(ds[j])
xdestabs.append(temp)
# -----------------
# ladder climb
ladder = []
x = 0
for y in range(distance - 1, 0, -1):
ladder.append((x, y))
for i in range(len(ladder)):
xdestabs.append(ladder[:i + 1])
ladder_points = []
for i in range((distance + 1) % 2, distance - 1, 2):
ladder_points.append(i)
ladder_temp = []
for i in ladder_points:
temp = list(ladder[:i + 1])
x, y = ladder[i]
for j in range(1, distance):
if j != 1:
temp = list(ladder_temp[-1])
new_point = (x + j, y - j)
if new_point[1] <= 0:
break
if new_point[0] >= distance - 1:
break
temp.append(new_point)
ladder_temp.append(temp)
xdestabs.extend(ladder_temp)
set_destabs = {}
relayout = {v: k for k, v in self.qecc.layout.items()}
for d in xdestabs:
row = set([])
# Find the associated ancilla location
x, y = d[-1]
a = relayout[(2 * x + 1 + 1, 2 * y + 1 - 1)]
if a in self.ancilla_x_check:
a = relayout[(2 * x - 1 + 1, 2 * y + 1 - 1)]
for x, y in d:
row.add(relayout[(2 * x + 1, 2 * y + 1)])
set_destabs[a] = set(row)
return set_destabs
def generate_zdestabs(self):
distance = self.qecc.distance
# x-type destabilizers
zdestabs_temp = []
# going alone the bottom
if distance % 2 == 0:
b = 2
else:
b = 1
for y in range(b, distance, 2):
temp = []
x = distance - 1
for j in range(0, distance):
new_point = (x - j, y + j)
if new_point[0] <= 0:
break
if new_point[1] > distance - 1:
break
temp.append(new_point)
# print(x, y)
zdestabs_temp.append(temp)
# ----------------
zdestabs = []
for ds in zdestabs_temp:
for i in range(len(ds)):
temp = []
for j in range(i + 1):
# print('-', i, j)
temp.append(ds[j])
zdestabs.append(temp)
# -----------------
# ladder climb
ladder = []
y = 0
for x in range(distance - 1, 0, -1):
ladder.append((x, y))
for i in range(len(ladder)):
zdestabs.append(ladder[:i + 1])
ladder_points = []
for i in range(distance % 2, distance - 1, 2):
ladder_points.append(i)
ladder_temp = []
for i in ladder_points:
temp = list(ladder[:i + 1])
x, y = ladder[i]
for j in range(1, distance):
if j != 1:
temp = list(ladder_temp[-1])
new_point = (x - j, y + j)
if new_point[0] <= 0:
break
if new_point[1] >= distance - 1:
break
temp.append(new_point)
ladder_temp.append(temp)
zdestabs.extend(ladder_temp)
set_destabs = {}
relayout = {v: k for k, v in self.qecc.layout.items()}
for d in zdestabs:
row = set([])
# Find the associated ancilla location
x, y = d[-1]
a = relayout[(2 * x + 1 - 1, 2 * y + 1 + 1)]
if a in self.ancilla_z_check:
a = relayout[(2 * x + 1 - 1, 2 * y + 1 - 1)]
for x, y in d:
row.add(relayout[(2 * x + 1, 2 * y + 1)])
set_destabs[a] = row
return set_destabs
class InstrInitZero(LogicalInstruction):
"""
Instruction for initializing a logical zero.
It is just like syndrome extraction except the data qubits are initialized in the zero state at tick = 0.
`ideal_meas` == True will cause the measurements to be replace with ideal measurements.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
self.symbol = 'instr_init_zero'
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
# This is basically syndrome extraction round where all the data qubits are initialized to zero.
syn_ext = qecc.instruction('instr_syn_extract', **gate_params)
# Make a shallow copy of the abstract circuits.
self.abstract_circuit = syn_ext.abstract_circuit.copy()
self.abstract_circuit.params.update(gate_params)
self.ancilla_x_check = syn_ext.ancilla_x_check
self.ancilla_z_check = syn_ext.ancilla_z_check
data_qudits = syn_ext.data_qudit_set
self.abstract_circuit.append('init |0>', locations=data_qudits, tick=0)
self.initial_logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': None, 'Z': None}, # None => can be anything
]
# Special for state initialization:
# ---------------------------------
# list of tuples of logical check and delogical stabilizer for each logical qudit.
self.final_logical_ops = [
{'Z': QuantumCircuit([{'Z': set(qecc.sides['top'])}]), 'X': QuantumCircuit([{'X': set(qecc.sides['left'])}])}
]
# List of corresponding logical sign. (The logical sign if the instruction is preformed ideally.)
self.logical_signs = [0]
self.logical_stabilizers = ['Z']
# ---------------------------------
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
gate_params = self.gate_params
syn_ext = self.qecc.instruction('instr_syn_extract', **gate_params)
for name, rows in syn_ext.stabs_destabs.items():
self._stabs_destabs[name] = []
for row in rows:
self._stabs_destabs[name].append(set(row))
# |0> -> logical Z is a stabilizer
self._stabs_destabs['stabs_z'].append(set(self.qecc.sides['top']))
self._stabs_destabs['stabs_x'].append(set([]))
self._stabs_destabs['destabs_x'].append(set(self.qecc.sides['left']))
self._stabs_destabs['destabs_z'].append(set([]))
return self._stabs_destabs
class InstrInitPlus(LogicalInstruction):
"""
Instruction for initializing a logical plus.
It is just like syndrome extraction except the data qubits are initialized in the plus state at tick = 0.
`ideal_meas` == True will cause the measurements to be replace with ideal measurements.
Parent class sets self.qecc.
"""
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
self.symbol = 'instr_init_plus'
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
# This is basically syndrome extraction round where all the data qubits are initialized to plus.
syn_ext = qecc.instruction('instr_syn_extract', **gate_params)
# Make a shallow copy of the abstract circuits.
self.abstract_circuit = syn_ext.abstract_circuit.copy()
self.abstract_circuit.params.update(gate_params)
self.ancilla_x_check = syn_ext.ancilla_x_check
self.ancilla_z_check = syn_ext.ancilla_z_check
data_qudits = syn_ext.data_qudit_set
# self.abstract_circuit.append('init |+>', qudits=data_qudits, tick=0)
self.abstract_circuit.append('init |0>', locations=data_qudits, tick=0)
self.abstract_circuit.append('H', locations=data_qudits, tick=1)
self.initial_logical_ops = [ # Each element in the list corresponds to a logical qubit
# The keys label the type of logical operator
{'X': None, 'Z': None}, # None => can be anything
]
# Special for state initialization:
# ---------------------------------
# list of tuples of logical check and delogical stabilizer for each logical qudit.
self.final_logical_ops = [
{'X': QuantumCircuit([{'X': set(qecc.sides['left'])}]), 'Z': QuantumCircuit([{'Z': set(qecc.sides['top'])}])}
]
# List of corresponding logical sign. (The logical sign if the instruction is preformed ideally.)
self.logical_signs = [0]
self.logical_stabilizers = ['X']
# ---------------------------------
# Must be called at the end of initiation.
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
gate_params = self.gate_params
syn_ext = self.qecc.instruction('instr_syn_extract', **gate_params)
for name, rows in syn_ext.stabs_destabs.items():
self._stabs_destabs[name] = []
for row in rows:
self._stabs_destabs[name].append(set(row))
# |0> -> logical Z is a stabilizer
self._stabs_destabs['stabs_x'].append(set(self.qecc.sides['left']))
self._stabs_destabs['stabs_z'].append(set([]))
self._stabs_destabs['destabs_z'].append(set(self.qecc.sides['top']))
self._stabs_destabs['stabs_x'].append(set([]))
return self._stabs_destabs
| 32.293638
| 121
| 0.547181
|
from ..instruction_parent_class import LogicalInstruction
from ...circuits.quantum_circuit import QuantumCircuit
from ..helper_functions import pos2qudit
class InstrSynExtraction(LogicalInstruction):
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
qecc_init_ticks = qecc.qecc_params.get('init_ticks', 0)
qecc_meas_ticks = qecc.qecc_params.get('meas_ticks', 7)
qecc_x_ticks = qecc.qecc_params.get('x_ticks', [2, 4, 3, 5])
qecc_z_ticks = qecc.qecc_params.get('z_ticks', [2, 4, 3, 5])
self.init_ticks = gate_params.get('init_ticks', qecc_init_ticks)
self.meas_ticks = gate_params.get('meas_ticks', qecc_meas_ticks)
self.x_ticks = gate_params.get('x_ticks', qecc_x_ticks)
self.z_ticks = gate_params.get('z_ticks', qecc_z_ticks)
self.abstract_circuit = QuantumCircuit(**gate_params)
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
self.ancilla_x_check = set([])
self.ancilla_z_check = set([])
layout = qecc.layout
self.pos2qudit = pos2qudit(layout)
for q, (x, y) in layout.items():
if x % 2 == 0 and y % 2 == 0:
if x % 4 == y % 4:
self._create_x_check(q, x, y)
else:
self._create_z_check(q, x, y)
z_qudits = set(qecc.sides['top'])
x_qudits = set(qecc.sides['left'])
logical_ops = [
{'X': QuantumCircuit([{'X': x_qudits}]), 'Z': QuantumCircuit([{'Z': z_qudits}])},
]
self.initial_logical_ops = logical_ops
logical_ops = [
{'X': QuantumCircuit([{'X': x_qudits}]), 'Z': QuantumCircuit([{'Z': z_qudits}])},
]
self.final_logical_ops = logical_ops
self.logical_signs = None
self.logical_stabilizers = None
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
def _create_x_check(self, ancilla, x, y):
self.ancilla_x_check.add(ancilla)
data_pos = self._data_pos_x_check(x, y)
datas, my_data_ticks = self._find_data(position_to_qudit=self.pos2qudit, positions=data_pos,
ticks=self.x_ticks)
locations = set(datas)
locations.add(ancilla)
self.abstract_circuit.append('X check', locations=locations, datas=datas, ancillas=ancilla,
ancilla_ticks=self.init_ticks, data_ticks=my_data_ticks,
meas_ticks=self.meas_ticks)
def _create_z_check(self, ancilla, x, y):
self.ancilla_z_check.add(ancilla)
data_pos = self._data_pos_z_check(x, y)
datas, my_data_ticks = self._find_data(position_to_qudit=self.pos2qudit, positions=data_pos,
ticks=self.z_ticks)
locations = set(datas)
locations.add(ancilla)
self.abstract_circuit.append('Z check', locations=locations, datas=datas, ancillas=ancilla,
ancilla_ticks=self.init_ticks, data_ticks=my_data_ticks,
meas_ticks=self.meas_ticks)
@staticmethod
def _find_data(position_to_qudit, positions, ticks):
data_list = []
tick_list = []
for i, p in enumerate(positions):
data = position_to_qudit.get(p, None)
if data is not None:
data_list.append(data)
tick_list.append(ticks[i])
return data_list, tick_list
@staticmethod
def _data_pos_z_check(x, y):
data_pos = [
(x - 1, y + 1),
(x + 1, y + 1),
(x - 1, y - 1),
(x + 1, y - 1)
]
return data_pos
@staticmethod
def _data_pos_x_check(x, y):
data_pos = [
(x - 1, y + 1),
(x - 1, y - 1),
(x + 1, y + 1),
(x + 1, y - 1)
]
return data_pos
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
if self.qecc.height != self.qecc.width:
raise Exception('This currently only works for square code blocks.')
instr = self
stabs_row_x = []
stabs_row_z = []
destabs_row_x = []
destabs_row_z = []
for a in self.ancilla_qudit_set:
stabs_row_z.append({a})
stabs_row_x.append(set([]))
destabs_row_x.append({a})
destabs_row_z.append(set([]))
xdestabs = self.generate_xdestabs()
zdestabs = self.generate_zdestabs()
for check_type, _, params in instr.abstract_circuit.items():
if check_type == 'X check':
stabs_row_x.append(set(params['datas']))
stabs_row_z.append(set([]))
destabs_row_x.append(set([]))
destabs_row_z.append(zdestabs[params['ancillas']])
else:
stabs_row_z.append(set(params['datas']))
stabs_row_x.append(set([]))
destabs_row_z.append(set([]))
destabs_row_x.append(xdestabs[params['ancillas']])
output_dict = {
'stabs_x': stabs_row_x,
'stabs_z': stabs_row_z,
'destabs_x': destabs_row_x,
'destabs_z': destabs_row_z,
}
self._stabs_destabs = output_dict
return output_dict
def generate_xdestabs(self):
distance = self.qecc.distance
xdestabs_temp = []
if distance % 2 == 0:
b = 1
else:
b = 2
for x in range(b, distance, 2):
temp = []
y = distance - 1
for j in range(0, distance):
new_point = (x + j, y - j)
if new_point[1] <= 0:
break
if new_point[0] > distance - 1:
break
temp.append(new_point)
xdestabs_temp.append(temp)
xdestabs = []
for ds in xdestabs_temp:
for i in range(len(ds)):
temp = []
for j in range(i + 1):
temp.append(ds[j])
xdestabs.append(temp)
ladder = []
x = 0
for y in range(distance - 1, 0, -1):
ladder.append((x, y))
for i in range(len(ladder)):
xdestabs.append(ladder[:i + 1])
ladder_points = []
for i in range((distance + 1) % 2, distance - 1, 2):
ladder_points.append(i)
ladder_temp = []
for i in ladder_points:
temp = list(ladder[:i + 1])
x, y = ladder[i]
for j in range(1, distance):
if j != 1:
temp = list(ladder_temp[-1])
new_point = (x + j, y - j)
if new_point[1] <= 0:
break
if new_point[0] >= distance - 1:
break
temp.append(new_point)
ladder_temp.append(temp)
xdestabs.extend(ladder_temp)
set_destabs = {}
relayout = {v: k for k, v in self.qecc.layout.items()}
for d in xdestabs:
row = set([])
x, y = d[-1]
a = relayout[(2 * x + 1 + 1, 2 * y + 1 - 1)]
if a in self.ancilla_x_check:
a = relayout[(2 * x - 1 + 1, 2 * y + 1 - 1)]
for x, y in d:
row.add(relayout[(2 * x + 1, 2 * y + 1)])
set_destabs[a] = set(row)
return set_destabs
def generate_zdestabs(self):
distance = self.qecc.distance
zdestabs_temp = []
if distance % 2 == 0:
b = 2
else:
b = 1
for y in range(b, distance, 2):
temp = []
x = distance - 1
for j in range(0, distance):
new_point = (x - j, y + j)
if new_point[0] <= 0:
break
if new_point[1] > distance - 1:
break
temp.append(new_point)
zdestabs_temp.append(temp)
zdestabs = []
for ds in zdestabs_temp:
for i in range(len(ds)):
temp = []
for j in range(i + 1):
temp.append(ds[j])
zdestabs.append(temp)
ladder = []
y = 0
for x in range(distance - 1, 0, -1):
ladder.append((x, y))
for i in range(len(ladder)):
zdestabs.append(ladder[:i + 1])
ladder_points = []
for i in range(distance % 2, distance - 1, 2):
ladder_points.append(i)
ladder_temp = []
for i in ladder_points:
temp = list(ladder[:i + 1])
x, y = ladder[i]
for j in range(1, distance):
if j != 1:
temp = list(ladder_temp[-1])
new_point = (x - j, y + j)
if new_point[0] <= 0:
break
if new_point[1] >= distance - 1:
break
temp.append(new_point)
ladder_temp.append(temp)
zdestabs.extend(ladder_temp)
set_destabs = {}
relayout = {v: k for k, v in self.qecc.layout.items()}
for d in zdestabs:
row = set([])
x, y = d[-1]
a = relayout[(2 * x + 1 - 1, 2 * y + 1 + 1)]
if a in self.ancilla_z_check:
a = relayout[(2 * x + 1 - 1, 2 * y + 1 - 1)]
for x, y in d:
row.add(relayout[(2 * x + 1, 2 * y + 1)])
set_destabs[a] = row
return set_destabs
class InstrInitZero(LogicalInstruction):
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
self.symbol = 'instr_init_zero'
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
syn_ext = qecc.instruction('instr_syn_extract', **gate_params)
self.abstract_circuit = syn_ext.abstract_circuit.copy()
self.abstract_circuit.params.update(gate_params)
self.ancilla_x_check = syn_ext.ancilla_x_check
self.ancilla_z_check = syn_ext.ancilla_z_check
data_qudits = syn_ext.data_qudit_set
self.abstract_circuit.append('init |0>', locations=data_qudits, tick=0)
self.initial_logical_ops = [
{'X': None, 'Z': None},
]
self.final_logical_ops = [
{'Z': QuantumCircuit([{'Z': set(qecc.sides['top'])}]), 'X': QuantumCircuit([{'X': set(qecc.sides['left'])}])}
]
self.logical_signs = [0]
self.logical_stabilizers = ['Z']
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
gate_params = self.gate_params
syn_ext = self.qecc.instruction('instr_syn_extract', **gate_params)
for name, rows in syn_ext.stabs_destabs.items():
self._stabs_destabs[name] = []
for row in rows:
self._stabs_destabs[name].append(set(row))
self._stabs_destabs['stabs_z'].append(set(self.qecc.sides['top']))
self._stabs_destabs['stabs_x'].append(set([]))
self._stabs_destabs['destabs_x'].append(set(self.qecc.sides['left']))
self._stabs_destabs['destabs_z'].append(set([]))
return self._stabs_destabs
class InstrInitPlus(LogicalInstruction):
def __init__(self, qecc, symbol, **gate_params):
super().__init__(qecc, symbol, **gate_params)
self.symbol = 'instr_init_plus'
self.data_qudit_set = self.qecc.data_qudit_set
self.ancilla_qudit_set = self.qecc.ancilla_qudit_set
syn_ext = qecc.instruction('instr_syn_extract', **gate_params)
self.abstract_circuit = syn_ext.abstract_circuit.copy()
self.abstract_circuit.params.update(gate_params)
self.ancilla_x_check = syn_ext.ancilla_x_check
self.ancilla_z_check = syn_ext.ancilla_z_check
data_qudits = syn_ext.data_qudit_set
self.abstract_circuit.append('init |0>', locations=data_qudits, tick=0)
self.abstract_circuit.append('H', locations=data_qudits, tick=1)
self.initial_logical_ops = [
{'X': None, 'Z': None},
]
self.final_logical_ops = [
{'X': QuantumCircuit([{'X': set(qecc.sides['left'])}]), 'Z': QuantumCircuit([{'Z': set(qecc.sides['top'])}])}
]
self.logical_signs = [0]
self.logical_stabilizers = ['X']
self._compile_circuit(self.abstract_circuit)
self._stabs_destabs = {}
@property
def stabs_destabs(self):
if self._stabs_destabs:
return self._stabs_destabs
gate_params = self.gate_params
syn_ext = self.qecc.instruction('instr_syn_extract', **gate_params)
for name, rows in syn_ext.stabs_destabs.items():
self._stabs_destabs[name] = []
for row in rows:
self._stabs_destabs[name].append(set(row))
self._stabs_destabs['stabs_x'].append(set(self.qecc.sides['left']))
self._stabs_destabs['stabs_z'].append(set([]))
self._stabs_destabs['destabs_z'].append(set(self.qecc.sides['top']))
self._stabs_destabs['stabs_x'].append(set([]))
return self._stabs_destabs
| true
| true
|
790b655cc3ef0d1d5d0c0c49c63148f7322be5a8
| 23,975
|
py
|
Python
|
python/athena/gpu_ops/StreamExecutor.py
|
DMALab/TSplit
|
8f86f987163aa06521bfeeb174616eb4a0a81b47
|
[
"Apache-2.0"
] | 2
|
2021-05-29T11:18:14.000Z
|
2021-09-09T14:29:21.000Z
|
python/athena/gpu_ops/StreamExecutor.py
|
DMALab/TSplit
|
8f86f987163aa06521bfeeb174616eb4a0a81b47
|
[
"Apache-2.0"
] | null | null | null |
python/athena/gpu_ops/StreamExecutor.py
|
DMALab/TSplit
|
8f86f987163aa06521bfeeb174616eb4a0a81b47
|
[
"Apache-2.0"
] | 1
|
2021-05-01T16:34:37.000Z
|
2021-05-01T16:34:37.000Z
|
""" library to take autodiff and execute a computation graph """
from __future__ import absolute_import
import numpy as np
from .Node import Op
from .. import ndarray
from ..stream import *
import ctypes
import os
from pynvml import *
FLAG_SHOW_GRAPH = False
G_NODE_ID = 0
NAME_RULE = 1
def communicate_init(worker_num, worker_id, source_ip, target_ip):
global lib_communicate
# lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002")
# lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001")
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../build/lib/')
path_to_so_file = os.path.join(lib_path, "lib_communication.so")
lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file)
lib_communicate.DL_Connect_Init(
worker_num, worker_id, source_ip, target_ip)
def communicate_finish():
lib_communicate.DL_Communicate_Close()
class Distributed_CommunicateOp(Op):
def __call__(self, nodeA):
new_node = Op.__call__(self)
new_node.inputs = [nodeA]
new_node.name = "Distributed_Communicate(%s)" % (nodeA.name)
# print nodeA.name
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
after_reduce_gradient_cpu = ndarray.empty(
shape=output_val.shape, ctx=ndarray.cpu(0))
if use_numpy:
gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0))
else:
gradient_val_cpu = ndarray.array(
input_vals[0].asnumpy(), ctx=ndarray.cpu(0))
# print gradient_val_cpu.asnumpy()
lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle)
lib_communicate.DL_Communicate(
gradient_val_cpu.handle, after_reduce_gradient_cpu.handle)
# print after_reduce_gradient_cpu.asnumpy()
if use_numpy:
output_val[:] = after_reduce_gradient_cpu.asnumpy()
else:
after_reduce_gradient_cpu.copyto(output_val)
def gradient(self, node, output_grad):
raise NotImplementedError
def infer_shape(self, node, input_shapes):
return input_shapes[0]
distributed_communicate_op = Distributed_CommunicateOp()
class StreamExecutor(object):
"""Executor computes values for given set of nodes in computation graph."""
def __init__(self, eval_node_list, ctx = None, stream = None, policy = None):
"""
Parameters
----------
eval_node_list: list of nodes whose values need to be computed.
ctx: runtime DLContext, default is None which means np.ndarray on cpu
topo_order: list of nodes in topological order
node_to_shape_map: dict from node to shape of the node
node_to_arr_map: dict from node to ndarray.NDArray allocated for node
feed_shapes: shapes of feed_dict from last run(...)
"""
self.eval_node_list = eval_node_list
self.ctx = ctx
if stream is None:
self.stream = create_stream_handle(ctx)
else:
self.stream = stream
self.stream.sync()
self.topo_order = find_topo_sort(self.eval_node_list)
self.node_to_shape_map = None
self.node_to_arr_map = None
self.feed_shapes = None
self.policy = policy
if self.policy == 'swap':
self.swap_queue = []
def infer_shape(self, feed_shapes):
"""Given shapes of feed_dict nodes, infer shape for all nodes in graph.
Implementation note:
Iteratively calls node.op.infer_shape to infer shapes.
Node shapes stored in self.node_to_shape_map.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
"""
"""TODO: Your code here"""
self.node_to_shape_map = {}
for node in self.topo_order:
if node in feed_shapes:
self.node_to_shape_map[node] = feed_shapes[node]
else:
# print(node.name)
input_shapes = [self.node_to_shape_map[n] for n in node.inputs]
self.node_to_shape_map[node] = node.op.infer_shape(
node, input_shapes)
def memory_plan(self, feed_shapes):
"""Allocates ndarray.NDArray for every node except feed_dict nodes.
Implementation note:
Option 1: Alloc a ndarray.NDArray per node that persists across run()
Option 2: Implement a memory pool to reuse memory for nodes of same
shapes. More details see Lecture 7.
For both options, self.node_to_arr_map stores node->NDArray mapping to
allow mapping to persist across multiple executor.run().
Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
"""
"""TODO: Your code here"""
assert (self.ctx is not None)
# self.infer_shape(feed_shapes)
self.node_to_arr_map = {}
for node, shape in self.node_to_shape_map.items():
if self.policy == 'swap':
if not node.swap:
self.node_to_arr_map[node] = ndarray.empty(
shape, ctx=self.ctx)
elif self.policy == 'vdnn':
self.node_to_arr_map[node] = np.empty(shape)
else:
self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)
def run(self, feed_dict, convert_to_numpy_ret_vals=False):
"""
Parameters
----------
feed_dict: a dictionary of node->np.ndarray supplied by user.
convert_to_numpy_ret_vals: whether to convert ret vals to np.array
Returns
-------
A list of values for nodes in eval_node_list. NDArray or np.ndarray.
"""
def are_feed_shapes_equal(sa, sb):
if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):
return False
unmatched_item = set(sa.items()) ^ set(sb.items())
return len(unmatched_item) == 0
# Assume self.ctx is None implies numpy array and numpy ops.
use_numpy = self.ctx is None
node_to_val_map = {}
for node, value in feed_dict.items():
if use_numpy:
# all values passed in feed_dict must be np.ndarray
assert isinstance(value, np.ndarray)
node_to_val_map[node] = value
else:
# convert values to ndarray.NDArray if necessary
if isinstance(value, np.ndarray):
node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
elif isinstance(value, ndarray.NDArray):
node_to_val_map[node] = value
else:
assert False, "feed_dict value type not supported"
# print"xxxx"
# collect shapes for all placeholders
# infer shape if feed_shapes changed since last run
# e.g. call run() on test data after trainng
# print feed_shapes
feed_shapes = {}
for node in node_to_val_map:
feed_shapes[node] = node_to_val_map[node].shape
if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):
self.infer_shape(feed_shapes)
self.feed_shapes = feed_shapes
if (not use_numpy):
self.memory_plan(self.feed_shapes)
for node in self.topo_order:
if node in node_to_val_map:
continue
input_vals = [node_to_val_map[n] for n in node.inputs]
if use_numpy:
node_val = np.empty(shape=self.node_to_shape_map[node])
else:
node_val = self.node_to_arr_map[node]
# print(node.name)
node.op.compute(node, input_vals, node_val, use_numpy, self.stream)
node_to_val_map[node] = node_val
self.stream.sync()
if not use_numpy and convert_to_numpy_ret_vals:
return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]
return [node_to_val_map[n] for n in self.eval_node_list]
# def run(self, feed_dict, convert_to_numpy_ret_vals=False):
# """
# Parameters
# ----------
# feed_dict: a dictionary of node->np.ndarray supplied by user.
# convert_to_numpy_ret_vals: whether to convert ret vals to np.array
# Returns
# -------
# A list of values for nodes in eval_node_list. NDArray or np.ndarray.
# """
# def are_feed_shapes_equal(sa, sb):
# if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):
# return False
# unmatched_item = set(sa.items()) ^ set(sb.items())
# return len(unmatched_item) == 0
# # Assume self.ctx is None implies numpy array and numpy ops.
# use_numpy = self.ctx is None
# node_to_val_map = {}
# for node, value in feed_dict.items():
# if self.policy == 'vdnn':
# assert isinstance(value, np.ndarray)
# node_to_val_map[node] = value
# else:
# if use_numpy:
# # all values passed in feed_dict must be np.ndarray
# assert isinstance(value, np.ndarray)
# node_to_val_map[node] = value
# else:
# # convert values to ndarray.NDArray if necessary
# if isinstance(value, np.ndarray):
# if self.policy == 'swap':
# if node.swap == True:
# node_to_val_map[node] = value
# else:
# node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
# else:
# node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
# elif isinstance(value, ndarray.NDArray):
# node_to_val_map[node] = value
# else:
# assert False, "feed_dict value type not supported"
# # collect shapes for all placeholders
# feed_shapes = {}
# for node in node_to_val_map:
# feed_shapes[node] = node_to_val_map[node].shape
# # infer shape if feed_shapes changed since last run
# # e.g. call run() on test data after trainng
# # print feed_shapes
# if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):
# self.infer_shape(feed_shapes)
# self.feed_shapes = feed_shapes
# if not self.policy == 'vdnn':
# # plan memory if using GPU
# if (not use_numpy):
# self.memory_plan(feed_shapes)
# # Traverse graph in topo order and compute values for all nodes.
# global FLAG_SHOW_GRAPH
# if self.policy == 'swap':
# # generate swap queue
# if not use_numpy:
# for node in self.topo_order:
# if node not in node_to_val_map:
# # variable in placeholder
# for input_node in node.inputs:
# if input_node.swap == True:
# self.swap_queue.append(input_node)
# # variable grad
# if node.swap == True:
# self.swap_queue.append(node)
# node_in_GPU = None
# if FLAG_SHOW_GRAPH:
# print "Show swap queue:"
# for node in self.swap_queue:
# print node
# elif self.policy == 'vdnn':
# # TODO traverse graph to select in-gpu window
# window = [0,0]
# if not use_numpy:
# nvmlInit()
# handle = nvmlDeviceGetHandleByIndex(0)
# info = nvmlDeviceGetMemoryInfo(handle)
# gpu_mem = info.free
# nvmlShutdown()
# loss_node = self.eval_node_list[0]
# window[1] = self.topo_order.index(loss_node)+1
# window[0] = self.topo_order.index(loss_node)+1
# for node in reversed(self.topo_order[:window[1]+1]):
# node_size = 4 # float32
# #print node, self.node_to_shape_map[node]
# for shape in self.node_to_shape_map[node]:
# node_size = node_size * shape
# if gpu_mem > node_size:
# gpu_mem = gpu_mem - node_size
# window[0] = window[0] - 1
# #print "gpu_mem:",gpu_mem
# # Traverse graph in topo order and compute values for all nodes.
# if FLAG_SHOW_GRAPH:
# print "run topo_order"
# # Show graph dependency
# if FLAG_SHOW_GRAPH:
# print "node:",node
# print "node.desc:",node.desc
# for node in self.topo_order:
# if self.policy == 'vdnn':
# # Skip placeholder nodes
# if node in node_to_val_map:
# continue
# # H2D before compute
# ## Collect inputs
# input_vals = []
# for n in node.inputs:
# if not use_numpy:
# if isinstance(node_to_val_map[n], np.ndarray):
# node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx)
# input_vals.append(node_to_val_map[n])
# ## Alloc node space
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx)
# # Compute
# # node_val is modified in-place whether np.ndarray or NDArray
# node.op.compute(node, input_vals, node_val, use_numpy)
# # D2H after compute
# if use_numpy:
# node_to_val_map[node] = node_val
# else:
# node_index = self.topo_order.index(node)
# if node_index > window[0] and node_index < window[1]:
# node_to_val_map[node] = node_val
# continue
# node_to_val_map[node] = node_val.asnumpy()
# del node_val
# for n in node.inputs:
# if isinstance(node_to_val_map[n], ndarray.NDArray):
# tmp_val = node_to_val_map[n].asnumpy()
# del node_to_val_map[n]
# node_to_val_map[n] = tmp_val
# elif self.policy == 'swap':
# # Switch in GPU
# if not use_numpy:
# if self.swap_queue and (node_in_GPU==None):
# swap_node = self.swap_queue[0]
# if swap_node in node_to_val_map:
# node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx)
# else:
# self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx)
# node_in_GPU = swap_node.id
# if node in node_to_val_map:
# # Skip placeholder nodes. Values already provided by feed_dict.
# continue
# # Compute
# input_vals = [node_to_val_map[n] for n in node.inputs]
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = self.node_to_arr_map[node]
# # node_val is modified in-place whether np.ndarray or NDArray
# node.op.compute(node, input_vals, node_val, use_numpy)
# if node.swap == True:
# node_to_val_map[node] = node_val.asnumpy()
# del node_val
# del self.node_to_arr_map[node]
# del self.swap_queue[0]
# node_in_GPU = None
# else:
# node_to_val_map[node] = node_val
# # Switch out GPU
# if not use_numpy:
# if self.swap_queue:
# if self.swap_queue[0] in node.inputs:
# out_node = self.swap_queue.pop(0)
# if self.swap_queue:
# if not self.swap_queue[0].id == node_in_GPU:
# tmp_array = node_to_val_map[out_node].asnumpy()
# del node_to_val_map[out_node]
# node_to_val_map[out_node] = tmp_array
# node_in_GPU = None
# else:
# if node in node_to_val_map:
# # Skip placeholder nodes. Values already provided by feed_dict.
# continue
# input_vals = [node_to_val_map[n] for n in node.inputs]
# # print self.node_to_shape_map[node]
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = self.node_to_arr_map[node]
# # node_val is modified in-place whether np.ndarray or NDArray
# # if (len(node.inputs) == 1):
# # print "computs",node.inputs[0].name
# # else:
# # print "computs",node.inputs[0].name,node.inputs[1].name
# # print node.name
# # print node_val.shape
# # print "xxx"
# # print node.name
# node.op.compute(node, input_vals, node_val, use_numpy)
# # print "xxx"
# node_to_val_map[node] = node_val
# # print "xxx"
# if FLAG_SHOW_GRAPH:
# FLAG_SHOW_GRAPH = False
# # Collect node values.
# if not use_numpy and convert_to_numpy_ret_vals:
# if self.policy == 'swap':
# node_values = []
# for n in self.eval_node_list:
# if n.swap == True:
# node_values.append(node_to_val_map[n])
# else:
# node_values.append(node_to_val_map[n].asnumpy())
# return node_values
# elif self.policy == 'vdnn':
# return [node_to_val_map[n] for n in self.eval_node_list]
# else:
# return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]
# return [node_to_val_map[n] for n in self.eval_node_list]
def gradients(output_node, node_list, scheduler_policy=None):
"""Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
"""
from . import OnesLike
node_to_output_grads_list = {}
node_to_output_grads_list[output_node] = [
OnesLike.oneslike_op(output_node)]
node_to_output_grad = {}
# Traverse forward graph in reverse topological order
reverse_topo_order = reversed(find_topo_sort([output_node]))
for node in reverse_topo_order:
output_grad = sum_node_list(node_to_output_grads_list[node])
node_to_output_grad[node] = output_grad
input_grads_list = node.op.gradient(node, output_grad)
#print len(node.name)
#print len(node.inputs)
#raw_input("\n\nPress the enter key to exit.")
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
# Calculate partial adjoint for input nodes.
# print node.name
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
if scheduler_policy == 'swap':
for node in node_list:
if node.swap:
node_to_output_grad[node].swap = True
grad_node_list = [node_to_output_grad[node] for node in node_list]
# grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list]
return grad_node_list
def distributed_gradients(output_node, node_list, scheduler_policy=None):
"""Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
"""
from .OnesLike import oneslike_op
node_to_output_grads_list = {}
node_to_output_grads_list[output_node] = [oneslike_op(output_node)]
node_to_output_grad = {}
# Traverse forward graph in reverse topological order
reverse_topo_order = reversed(find_topo_sort([output_node]))
for node in reverse_topo_order:
output_grad = sum_node_list(node_to_output_grads_list[node])
node_to_output_grad[node] = output_grad
input_grads_list = node.op.gradient(node, output_grad)
#print len(node.name)
#print len(node.inputs)
#raw_input("\n\nPress the enter key to exit.")
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
# Calculate partial adjoint for input nodes.
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
if scheduler_policy == 'swap':
for node in node_list:
if node.swap:
node_to_output_grad[node].swap = True
# grad_node_list = [node_to_output_grad[node] for node in node_list]
grad_node_list = [distributed_communicate_op(
node_to_output_grad[node]) for node in node_list]
return grad_node_list
##################
# Helper Methods #
##################
def find_topo_sort(node_list):
"""Given a list of nodes, return a topo ordering of nodes ending in them.
A simple algorithm is to do a post-order DFS traversal on the given nodes,
going backwards based on input edges. Since a node is added to the ordering
after all its predecessors are traversed due to post-order DFS, we get a
topological sort.
"""
visited = set()
topo_order = []
for node in node_list:
topo_sort_dfs(node, visited, topo_order)
return topo_order
def topo_sort_dfs(node, visited, topo_order):
"""Post-order DFS"""
if node in visited:
return
visited.add(node)
for n in node.inputs:
topo_sort_dfs(n, visited, topo_order)
topo_order.append(node)
def sum_node_list(node_list):
"""Custom sum func to avoid creating redundant nodes in Python sum func."""
from operator import add
from functools import reduce
return reduce(add, node_list)
def broadcast_rule(shape_a, shape_b):
"""Return output shape of broadcast shape_a, shape_b.
e.g. broadcast_rule((3,2), (4,3,2))
returns output_shape = (4,3,2)
Check out explanations and more examples at
https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html
http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/
"""
assert(isinstance(shape_a, tuple))
assert(isinstance(shape_b, tuple))
if len(shape_a) > len(shape_b):
longer_shape, shorter_shape = shape_a, shape_b
else:
longer_shape, shorter_shape = shape_b, shape_a
len_diff = len(longer_shape) - len(shorter_shape)
for i in range(len_diff):
# pad with leading 1s
shorter_shape = (1,) + shorter_shape
assert len(shorter_shape) == len(longer_shape)
output_shape = list(longer_shape)
for i in range(len(output_shape)):
assert (shorter_shape[i] == longer_shape[i]) \
or (shorter_shape[i] == 1) \
or (longer_shape[i] == 1)
output_shape[i] = max(shorter_shape[i], longer_shape[i])
return tuple(output_shape)
| 40.091973
| 116
| 0.591074
|
from __future__ import absolute_import
import numpy as np
from .Node import Op
from .. import ndarray
from ..stream import *
import ctypes
import os
from pynvml import *
FLAG_SHOW_GRAPH = False
G_NODE_ID = 0
NAME_RULE = 1
def communicate_init(worker_num, worker_id, source_ip, target_ip):
global lib_communicate
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../build/lib/')
path_to_so_file = os.path.join(lib_path, "lib_communication.so")
lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file)
lib_communicate.DL_Connect_Init(
worker_num, worker_id, source_ip, target_ip)
def communicate_finish():
lib_communicate.DL_Communicate_Close()
class Distributed_CommunicateOp(Op):
def __call__(self, nodeA):
new_node = Op.__call__(self)
new_node.inputs = [nodeA]
new_node.name = "Distributed_Communicate(%s)" % (nodeA.name)
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
after_reduce_gradient_cpu = ndarray.empty(
shape=output_val.shape, ctx=ndarray.cpu(0))
if use_numpy:
gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0))
else:
gradient_val_cpu = ndarray.array(
input_vals[0].asnumpy(), ctx=ndarray.cpu(0))
lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle)
lib_communicate.DL_Communicate(
gradient_val_cpu.handle, after_reduce_gradient_cpu.handle)
if use_numpy:
output_val[:] = after_reduce_gradient_cpu.asnumpy()
else:
after_reduce_gradient_cpu.copyto(output_val)
def gradient(self, node, output_grad):
raise NotImplementedError
def infer_shape(self, node, input_shapes):
return input_shapes[0]
distributed_communicate_op = Distributed_CommunicateOp()
class StreamExecutor(object):
def __init__(self, eval_node_list, ctx = None, stream = None, policy = None):
self.eval_node_list = eval_node_list
self.ctx = ctx
if stream is None:
self.stream = create_stream_handle(ctx)
else:
self.stream = stream
self.stream.sync()
self.topo_order = find_topo_sort(self.eval_node_list)
self.node_to_shape_map = None
self.node_to_arr_map = None
self.feed_shapes = None
self.policy = policy
if self.policy == 'swap':
self.swap_queue = []
def infer_shape(self, feed_shapes):
self.node_to_shape_map = {}
for node in self.topo_order:
if node in feed_shapes:
self.node_to_shape_map[node] = feed_shapes[node]
else:
input_shapes = [self.node_to_shape_map[n] for n in node.inputs]
self.node_to_shape_map[node] = node.op.infer_shape(
node, input_shapes)
def memory_plan(self, feed_shapes):
assert (self.ctx is not None)
self.node_to_arr_map = {}
for node, shape in self.node_to_shape_map.items():
if self.policy == 'swap':
if not node.swap:
self.node_to_arr_map[node] = ndarray.empty(
shape, ctx=self.ctx)
elif self.policy == 'vdnn':
self.node_to_arr_map[node] = np.empty(shape)
else:
self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)
def run(self, feed_dict, convert_to_numpy_ret_vals=False):
def are_feed_shapes_equal(sa, sb):
if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):
return False
unmatched_item = set(sa.items()) ^ set(sb.items())
return len(unmatched_item) == 0
use_numpy = self.ctx is None
node_to_val_map = {}
for node, value in feed_dict.items():
if use_numpy:
assert isinstance(value, np.ndarray)
node_to_val_map[node] = value
else:
if isinstance(value, np.ndarray):
node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
elif isinstance(value, ndarray.NDArray):
node_to_val_map[node] = value
else:
assert False, "feed_dict value type not supported"
feed_shapes = {}
for node in node_to_val_map:
feed_shapes[node] = node_to_val_map[node].shape
if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):
self.infer_shape(feed_shapes)
self.feed_shapes = feed_shapes
if (not use_numpy):
self.memory_plan(self.feed_shapes)
for node in self.topo_order:
if node in node_to_val_map:
continue
input_vals = [node_to_val_map[n] for n in node.inputs]
if use_numpy:
node_val = np.empty(shape=self.node_to_shape_map[node])
else:
node_val = self.node_to_arr_map[node]
node.op.compute(node, input_vals, node_val, use_numpy, self.stream)
node_to_val_map[node] = node_val
self.stream.sync()
if not use_numpy and convert_to_numpy_ret_vals:
return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]
return [node_to_val_map[n] for n in self.eval_node_list]
# Parameters
# ----------
# feed_dict: a dictionary of node->np.ndarray supplied by user.
# convert_to_numpy_ret_vals: whether to convert ret vals to np.array
# Returns
# -------
# A list of values for nodes in eval_node_list. NDArray or np.ndarray.
# """
de_to_output_grad[node] = output_grad
input_grads_list = node.op.gradient(node, output_grad)
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
if scheduler_policy == 'swap':
for node in node_list:
if node.swap:
node_to_output_grad[node].swap = True
grad_node_list = [node_to_output_grad[node] for node in node_list]
return grad_node_list
def distributed_gradients(output_node, node_list, scheduler_policy=None):
from .OnesLike import oneslike_op
node_to_output_grads_list = {}
node_to_output_grads_list[output_node] = [oneslike_op(output_node)]
node_to_output_grad = {}
reverse_topo_order = reversed(find_topo_sort([output_node]))
for node in reverse_topo_order:
output_grad = sum_node_list(node_to_output_grads_list[node])
node_to_output_grad[node] = output_grad
input_grads_list = node.op.gradient(node, output_grad)
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
if scheduler_policy == 'swap':
for node in node_list:
if node.swap:
node_to_output_grad[node].swap = True
grad_node_list = [distributed_communicate_op(
node_to_output_grad[node]) for node in node_list]
return grad_node_list
:
topo_sort_dfs(n, visited, topo_order)
topo_order.append(node)
def sum_node_list(node_list):
from operator import add
from functools import reduce
return reduce(add, node_list)
def broadcast_rule(shape_a, shape_b):
assert(isinstance(shape_a, tuple))
assert(isinstance(shape_b, tuple))
if len(shape_a) > len(shape_b):
longer_shape, shorter_shape = shape_a, shape_b
else:
longer_shape, shorter_shape = shape_b, shape_a
len_diff = len(longer_shape) - len(shorter_shape)
for i in range(len_diff):
shorter_shape = (1,) + shorter_shape
assert len(shorter_shape) == len(longer_shape)
output_shape = list(longer_shape)
for i in range(len(output_shape)):
assert (shorter_shape[i] == longer_shape[i]) \
or (shorter_shape[i] == 1) \
or (longer_shape[i] == 1)
output_shape[i] = max(shorter_shape[i], longer_shape[i])
return tuple(output_shape)
| true
| true
|
790b663ab3c11448db059f1eb8ab6cd56ba588e3
| 502
|
py
|
Python
|
djcommerce/models/profile.py
|
tdsprogramming/djcommerce
|
dc7f16a876b6c9cdd323a097a0b411e45b029373
|
[
"MIT"
] | 1
|
2019-06-26T19:35:01.000Z
|
2019-06-26T19:35:01.000Z
|
djcommerce/models/profile.py
|
tdsprogramming/djcommerce
|
dc7f16a876b6c9cdd323a097a0b411e45b029373
|
[
"MIT"
] | 9
|
2019-06-28T01:29:25.000Z
|
2022-02-10T12:19:32.000Z
|
djcommerce/models/profile.py
|
tdsprogramming/djcommerce
|
dc7f16a876b6c9cdd323a097a0b411e45b029373
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.conf import settings
from django_extensions.db.models import TimeStampedModel
from djcommerce.utils import get_address_model
Address = get_address_model()
class Profile(TimeStampedModel):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE
)
addresses = models.ManyToManyField(Address)
class Meta:
abstract = False
if hasattr(settings,"PROFILE_MODEL"):
abstract = True
| 23.904762
| 56
| 0.723108
|
from django.db import models
from django.conf import settings
from django_extensions.db.models import TimeStampedModel
from djcommerce.utils import get_address_model
Address = get_address_model()
class Profile(TimeStampedModel):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE
)
addresses = models.ManyToManyField(Address)
class Meta:
abstract = False
if hasattr(settings,"PROFILE_MODEL"):
abstract = True
| true
| true
|
790b665cda7be12dcf2d030c0739d514b867053b
| 2,838
|
py
|
Python
|
netbox/ipam/tests/test_models.py
|
0xAalaoui/netbox
|
07364abf9e9ff193bad49b790e657382cf186f0c
|
[
"Apache-2.0"
] | 1
|
2018-07-31T06:54:02.000Z
|
2018-07-31T06:54:02.000Z
|
netbox/ipam/tests/test_models.py
|
0xAalaoui/netbox
|
07364abf9e9ff193bad49b790e657382cf186f0c
|
[
"Apache-2.0"
] | null | null | null |
netbox/ipam/tests/test_models.py
|
0xAalaoui/netbox
|
07364abf9e9ff193bad49b790e657382cf186f0c
|
[
"Apache-2.0"
] | 1
|
2021-04-09T06:08:21.000Z
|
2021-04-09T06:08:21.000Z
|
from __future__ import unicode_literals
import netaddr
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from ipam.models import IPAddress, Prefix, VRF
class TestPrefix(TestCase):
@override_settings(ENFORCE_GLOBAL_UNIQUE=False)
def test_duplicate_global(self):
Prefix.objects.create(prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertIsNone(duplicate_prefix.clean())
@override_settings(ENFORCE_GLOBAL_UNIQUE=True)
def test_duplicate_global_unique(self):
Prefix.objects.create(prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertRaises(ValidationError, duplicate_prefix.clean)
def test_duplicate_vrf(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=False)
Prefix.objects.create(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertIsNone(duplicate_prefix.clean())
def test_duplicate_vrf_unique(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=True)
Prefix.objects.create(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertRaises(ValidationError, duplicate_prefix.clean)
class TestIPAddress(TestCase):
@override_settings(ENFORCE_GLOBAL_UNIQUE=False)
def test_duplicate_global(self):
IPAddress.objects.create(address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertIsNone(duplicate_ip.clean())
@override_settings(ENFORCE_GLOBAL_UNIQUE=True)
def test_duplicate_global_unique(self):
IPAddress.objects.create(address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertRaises(ValidationError, duplicate_ip.clean)
def test_duplicate_vrf(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=False)
IPAddress.objects.create(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertIsNone(duplicate_ip.clean())
def test_duplicate_vrf_unique(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=True)
IPAddress.objects.create(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertRaises(ValidationError, duplicate_ip.clean)
| 45.774194
| 84
| 0.72093
|
from __future__ import unicode_literals
import netaddr
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from ipam.models import IPAddress, Prefix, VRF
class TestPrefix(TestCase):
@override_settings(ENFORCE_GLOBAL_UNIQUE=False)
def test_duplicate_global(self):
Prefix.objects.create(prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertIsNone(duplicate_prefix.clean())
@override_settings(ENFORCE_GLOBAL_UNIQUE=True)
def test_duplicate_global_unique(self):
Prefix.objects.create(prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertRaises(ValidationError, duplicate_prefix.clean)
def test_duplicate_vrf(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=False)
Prefix.objects.create(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertIsNone(duplicate_prefix.clean())
def test_duplicate_vrf_unique(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=True)
Prefix.objects.create(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertRaises(ValidationError, duplicate_prefix.clean)
class TestIPAddress(TestCase):
@override_settings(ENFORCE_GLOBAL_UNIQUE=False)
def test_duplicate_global(self):
IPAddress.objects.create(address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertIsNone(duplicate_ip.clean())
@override_settings(ENFORCE_GLOBAL_UNIQUE=True)
def test_duplicate_global_unique(self):
IPAddress.objects.create(address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertRaises(ValidationError, duplicate_ip.clean)
def test_duplicate_vrf(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=False)
IPAddress.objects.create(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertIsNone(duplicate_ip.clean())
def test_duplicate_vrf_unique(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=True)
IPAddress.objects.create(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertRaises(ValidationError, duplicate_ip.clean)
| true
| true
|
790b683e58ce703e032cd4ecb9667601e4fdd9dd
| 48,363
|
py
|
Python
|
tensorflow/contrib/distribute/python/keras_test.py
|
unnir/tensorflow
|
656b2fe018a7940595121ea08d4a1ddf29fa65d0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/distribute/python/keras_test.py
|
unnir/tensorflow
|
656b2fe018a7940595121ea08d4a1ddf29fa65d0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/distribute/python/keras_test.py
|
unnir/tensorflow
|
656b2fe018a7940595121ea08d4a1ddf29fa65d0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import keras as keras_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution):
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, tpu_strategy.TPUStrategy):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
def get_correctness_test_inputs(use_numpy, with_distribution,
x_train, y_train, x_predict):
"""Generates the inputs for correctness check when enable Keras with DS."""
global_batch_size = 64
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
with_distribution and
with_distribution.__class__.__name__ != 'TPUStrategy')
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': 1,
'shuffle': False,
}
eval_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
}
# TODO(b/119318587): We should not require batch_size when distribution
# is enabled.
if with_distribution:
if use_per_core_batch_size:
predict_batch_size = (
len(x_predict) // with_distribution.num_replicas_in_sync)
else:
predict_batch_size = len(x_predict)
else:
predict_batch_size = None
predict_inputs = {
'batch_size': predict_batch_size,
'x': np.array(x_predict, dtype=np.float32),
}
else:
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, with_distribution)
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': 1,
'shuffle': False,
'steps_per_epoch': len(x_train) // global_batch_size,
}
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': 20,
}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset,
predict_batch_size, with_distribution)
predict_inputs = {
'batch_size': None,
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
strategies = [combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus],
mode=['graph'])
def strategy_combinations():
return combinations.combine(
distribution=strategies,
mode=['graph'])
def strategy_and_optimizer_combinations():
return combinations.combine(
distribution=strategies,
optimizer=[combinations.adagrad_optimizer_v1_fn,
combinations.adam_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.rmsprop_optimizer_v1_fn],
mode=['graph'])
def strategy_and_inputs():
return combinations.combine(
distribution=strategies,
use_numpy=[True, False],
mode=['graph'])
class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(),
'keras_mirrored_strategy_test')
gfile.MakeDirs(self._base_dir)
self._config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)
self._dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
def tearDown(self):
writer_cache.FileWriterCache.clear()
if os.path.isdir(self._base_dir):
gfile.DeleteRecursively(self._base_dir)
def test_train_functional_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_functional_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist,
eval_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_train_sequential_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):
train_data, test_data = get_multi_inputs_multi_outputs_data()
def train_input_fn():
input_dict = {
'input_a': train_data['input_a'],
'input_b': train_data['input_b'],
'input_m': train_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': train_data['output_c'],
'dense_3': train_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
def eval_input_fn():
input_dict = {
'input_a': test_data['input_a'],
'input_b': test_data['input_b'],
'input_m': test_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': test_data['output_c'],
'dense_3': test_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
self.do_test_multi_inputs_multi_outputs_with_input_fn(
train_input_fn, eval_input_fn)
def do_test_multi_inputs_multi_outputs_with_input_fn(self, train_input_fn,
eval_input_fn):
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=self._dist)
with self.cached_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)
baseline_eval_results = est_keras.evaluate(
input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
def test_keras_optimizer_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.rmsprop(lr=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
config=config)
with self.assertRaisesRegexp(ValueError,
'Only TensorFlow native optimizers are '
'supported with DistributionStrategy.'):
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_creating_var_with_numpy_arrays(self, distribution):
with self.cached_session():
x = np.asarray(np.random.random((64, 3)), dtype=np.float32)
var_x = distributed_training_utils.get_var_for_numpy(distribution, x)
val = self.evaluate(var_x.value())
# Verify that the numpy value is copied to the variable.
self.assertAllEqual(x, val)
def test_calculating_batch_params(self):
# This verifies that we calculate the number of steps when the batch size
# is specified.
with self.cached_session():
# 64 is the number of input samples.
inputs = np.zeros((64, 3), dtype=np.float32)
# The number of replicas is equal to 3.
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0',
'/device:GPU:1'])
with self.assertRaisesRegexp(ValueError, 'Please specify a batch_size '
'that is smaller than'):
# The batch size(128) is larger than the number of input
# samples(64).
distributed_training_utils.get_input_batch_params(inputs,
128,
strategy)
with self.assertRaisesRegexp(ValueError, 'is smaller than the number '
'of replicas'):
# The batch size(32) * num_replicas_in_sync(3) is 96 which is greater
# than the number of input samples(64).
distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
# The number of replicas now is equal to 2.
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
# 32 is the batch size per replica.
steps = distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
# The number of batches is the ratio of input samples(64) to
# batch size(32) which is 2. The number of steps(1) is the ratio of
# number of batches(2) to the number of replicas(2).
self.assertEqual(steps, 1)
# 16 is the batch size per replica.
steps = distributed_training_utils.get_input_batch_params(inputs,
16,
strategy)
# The number of batches is the ratio of input samples(64) to
# batch size(16) which is 4. The number of steps(2) is the ratio of
# number of batches(4) to the number of replicas(2).
self.assertEqual(steps, 2)
def test_calculating_batch_size(self):
with self.cached_session():
# 64 is the number of input samples.
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
strategy._require_static_shapes = True
model.compile(optimizer, loss, distribute=strategy)
iterator = model._distribution_standardize_user_data(inputs,
targets,
batch_size=None,
check_steps=True,
steps_name='steps',
steps=3)
# The global batch size(21) across all replicas is the ratio of the input
# samples(64) to the steps(3).
# The batch size(10) per device is the ratio of the global batch size(21)
# to the number of replicas(2).
# The global batch size and batch size are rounded integer values.
self.assertEqual(10, distributed_training_utils.get_batch_dimension(
iterator._iterator))
@combinations.generate(strategy_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_minus_tpu_combinations())
def test_numpy_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertEqual(2, len(outs))
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
user_controlled_model = get_model()
user_controlled_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
def test_fit_with_tuple_and_dict_dataset_inputs(self):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_minus_tpu_combinations())
def test_dataset_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat()
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, distribute=strategy)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
dataset = get_dataset(distribution)
# Input shapes are not fully known. Batch dimension is unknown as we are
# not using the drop_remainder argument.
dataset = dataset.repeat(100).batch(10)
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
def test_learning_phase_value(self):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
strategy = mirrored_strategy.MirroredStrategy(
['/device:GPU:0', '/device:GPU:1'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(8)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(5)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps and in each step you process 10 samples.
ref_output = np.ones((100, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
def test_validating_dataset_input_tensors_with_shape_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2))
b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2))
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor shape details from the error message
# since the order of the device and the corresponding input tensor shape
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor shapes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_validating_dataset_input_tensors_with_dtype_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32)
b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64)
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor dtype details from the error message
# since the order of the device and the corresponding input tensor dtype
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor dtypes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_unsupported_features(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument.
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(dataset, verbose=0)
def test_calling_with_unsupported_predefined_callbacks(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'LearningRateScheduler callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'ReduceLROnPlateau callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
with self.assertRaisesRegexp(ValueError,
'histogram_freq in the TensorBoard callback '
'is not supported when using '
'DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.TensorBoard(histogram_freq=10)])
class TestDistributionStrategyWithLossMasking(test.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
def test_masking(self):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=strategy)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
class TestDistributionStrategyWithNormalizationLayer(
test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_batchnorm_correctness(self, distribution):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
x = x.astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 32, distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = batch_wrapper(predict_dataset, 32, distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_metric_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution)
batch_size = 64
batch_size //= distribution.num_replicas_in_sync
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=1, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0])
@combinations.generate(strategy_and_inputs())
def test_correctness(self, distribution, use_numpy):
with self.cached_session():
tolerance = 1e-5
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
# TODO(b/119257215): use the default one once the flakyness is fixed.
tolerance = 1e-4
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
# Train, eval, and predict datasets are created with the same input numpy
# arrays.
# TODO(xiejw): Change this back to 10000, once we support final partial
# batch.
num_samples = 9984
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_predict = [[1.], [2.], [3.], [4.]]
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run. In addition, we add few non-linear layers to make
# it non-trivial.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
initial_weights = model.get_weights()
def fit_and_predict(with_distribution=None):
# We have initialized the model to the same weight for the distribution
# and non-distribution run.
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
distribute=with_distribution)
training_inputs, eval_inputs, predict_inputs = (
get_correctness_test_inputs(use_numpy, with_distribution,
x_train, y_train, x_predict))
model.fit(**training_inputs)
eval_result = model.evaluate(**eval_inputs)
weights = model.get_weights()
predict_result = model.predict(**predict_inputs)
return weights, eval_result, predict_result
wts_with_ds, eval_with_ds, predict_with_ds = fit_and_predict(
with_distribution=distribution)
wts_without_ds, eval_without_ds, predict_without_ds = fit_and_predict(
with_distribution=None)
# Verify that the weights, eval results, predict outputs are the same
# within some limits of tolerance.
self.assertAllClose(
wts_with_ds, wts_without_ds, atol=tolerance, rtol=tolerance)
self.assertAllClose(
eval_with_ds, eval_without_ds, atol=tolerance, rtol=tolerance)
self.assertAllClose(
predict_with_ds, predict_without_ds, atol=tolerance, rtol=tolerance)
# TODO(priyag): Add a test for TPUStrategy with steps_per_run > 1.
if __name__ == '__main__':
test.main()
| 40.847128
| 80
| 0.657507
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import keras as keras_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_functional_model():
a = keras.layers.Input(shape=_INPUT_SIZE)
b = keras.layers.Dense(16, activation='relu')(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
def get_ds_train_input_fn():
np.random.seed(_RANDOM_SEED)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(32)
return dataset
def get_ds_test_input_fn():
np.random.seed(_RANDOM_SEED)
_, (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_test = keras.utils.to_categorical(y_test)
dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.batch(32)
return dataset
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution):
if isinstance(distribution, tpu_strategy.TPUStrategy):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
def get_correctness_test_inputs(use_numpy, with_distribution,
x_train, y_train, x_predict):
global_batch_size = 64
batch_size = global_batch_size
use_per_core_batch_size = (
with_distribution and
with_distribution.__class__.__name__ != 'TPUStrategy')
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': 1,
'shuffle': False,
}
eval_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
}
if with_distribution:
if use_per_core_batch_size:
predict_batch_size = (
len(x_predict) // with_distribution.num_replicas_in_sync)
else:
predict_batch_size = len(x_predict)
else:
predict_batch_size = None
predict_inputs = {
'batch_size': predict_batch_size,
'x': np.array(x_predict, dtype=np.float32),
}
else:
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, with_distribution)
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': 1,
'shuffle': False,
'steps_per_epoch': len(x_train) // global_batch_size,
}
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': 20,
}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset,
predict_batch_size, with_distribution)
predict_inputs = {
'batch_size': None,
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
strategies = [combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.tpu_strategy,
combinations.tpu_strategy_one_step]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus],
mode=['graph'])
def strategy_combinations():
return combinations.combine(
distribution=strategies,
mode=['graph'])
def strategy_and_optimizer_combinations():
return combinations.combine(
distribution=strategies,
optimizer=[combinations.adagrad_optimizer_v1_fn,
combinations.adam_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.rmsprop_optimizer_v1_fn],
mode=['graph'])
def strategy_and_inputs():
return combinations.combine(
distribution=strategies,
use_numpy=[True, False],
mode=['graph'])
class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(),
'keras_mirrored_strategy_test')
gfile.MakeDirs(self._base_dir)
self._config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)
self._dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
def tearDown(self):
writer_cache.FileWriterCache.clear()
if os.path.isdir(self._base_dir):
gfile.DeleteRecursively(self._base_dir)
def test_train_functional_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_functional_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist,
eval_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_train_sequential_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
metrics=[keras.metrics.CategoricalAccuracy()],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, config=config)
before_eval_results = est_keras.evaluate(
input_fn=get_ds_test_input_fn, steps=1)
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,
steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):
train_data, test_data = get_multi_inputs_multi_outputs_data()
def train_input_fn():
input_dict = {
'input_a': train_data['input_a'],
'input_b': train_data['input_b'],
'input_m': train_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': train_data['output_c'],
'dense_3': train_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
def eval_input_fn():
input_dict = {
'input_a': test_data['input_a'],
'input_b': test_data['input_b'],
'input_m': test_data['input_m'].astype(np.str)
}
output_dict = {
'dense_2': test_data['output_c'],
'dense_3': test_data['output_d']
}
return dataset_ops.Dataset.from_tensor_slices((input_dict,
output_dict)).batch(16)
self.do_test_multi_inputs_multi_outputs_with_input_fn(
train_input_fn, eval_input_fn)
def do_test_multi_inputs_multi_outputs_with_input_fn(self, train_input_fn,
eval_input_fn):
config = run_config_lib.RunConfig(
tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=self._dist)
with self.cached_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)
baseline_eval_results = est_keras.evaluate(
input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
def test_keras_optimizer_with_distribution_strategy(self):
dist = mirrored_strategy.MirroredStrategy(
devices=['/device:GPU:0', '/device:GPU:1'])
keras_model = simple_sequential_model()
keras_model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.rmsprop(lr=0.01))
config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
model_dir=self._base_dir,
train_distribute=dist)
with self.cached_session():
est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
config=config)
with self.assertRaisesRegexp(ValueError,
'Only TensorFlow native optimizers are '
'supported with DistributionStrategy.'):
est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_creating_var_with_numpy_arrays(self, distribution):
with self.cached_session():
x = np.asarray(np.random.random((64, 3)), dtype=np.float32)
var_x = distributed_training_utils.get_var_for_numpy(distribution, x)
val = self.evaluate(var_x.value())
self.assertAllEqual(x, val)
def test_calculating_batch_params(self):
with self.cached_session():
inputs = np.zeros((64, 3), dtype=np.float32)
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0',
'/device:GPU:1'])
with self.assertRaisesRegexp(ValueError, 'Please specify a batch_size '
'that is smaller than'):
distributed_training_utils.get_input_batch_params(inputs,
128,
strategy)
with self.assertRaisesRegexp(ValueError, 'is smaller than the number '
'of replicas'):
distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
steps = distributed_training_utils.get_input_batch_params(inputs,
32,
strategy)
self.assertEqual(steps, 1)
steps = distributed_training_utils.get_input_batch_params(inputs,
16,
strategy)
self.assertEqual(steps, 2)
def test_calculating_batch_size(self):
with self.cached_session():
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
strategy._require_static_shapes = True
model.compile(optimizer, loss, distribute=strategy)
iterator = model._distribution_standardize_user_data(inputs,
targets,
batch_size=None,
check_steps=True,
steps_name='steps',
steps=3)
self.assertEqual(10, distributed_training_utils.get_batch_dimension(
iterator._iterator))
@combinations.generate(strategy_combinations())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets)
model.evaluate(inputs, targets, steps=2)
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
model.predict(inputs, steps=2)
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_combinations())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
model.evaluate(inputs, targets)
model.evaluate(inputs, targets, steps=2)
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
model.predict(inputs, steps=2)
model.predict(inputs, batch_size=8)
@combinations.generate(strategy_minus_tpu_combinations())
def test_numpy_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
self.assertEqual(2, len(outs))
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_combinations())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution):
with self.cached_session():
user_controlled_model = get_model()
user_controlled_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution)
dataset = get_dataset(distribution)
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_mean_absolute_error'],
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# as clone_model's input_tensors argument only seems to accept list and not
def test_fit_with_tuple_and_dict_dataset_inputs(self):
with self.cached_session():
model = multi_input_output_model()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(strategy_combinations())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, distribute=distribution)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_minus_tpu_combinations())
def test_dataset_with_sample_weights(self, distribution):
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat()
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, distribute=strategy)
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, distribute=distribution)
dataset = get_dataset(distribution)
dataset = dataset.repeat(100).batch(10)
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
def test_learning_phase_value(self):
# Lambda layer uses the learning phase.
with self.cached_session():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer = gradient_descent.GradientDescentOptimizer(0.005)
loss = 'mse'
metrics = ['acc']
strategy = mirrored_strategy.MirroredStrategy(
['/device:GPU:0', '/device:GPU:1'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(8)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(5)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps and in each step you process 10 samples.
ref_output = np.ones((100, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
def test_validating_dataset_input_tensors_with_shape_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2))
b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2))
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor shape details from the error message
# since the order of the device and the corresponding input tensor shape
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor shapes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_validating_dataset_input_tensors_with_dtype_mismatch(self):
with self.cached_session():
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',
'/device:CPU:0'])
a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32)
b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64)
x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})
y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})
with strategy.scope():
# Removed device and input tensor dtype details from the error message
# since the order of the device and the corresponding input tensor dtype
# is not deterministic over different runs.
with self.assertRaisesRegexp(ValueError,
'Input tensor dtypes do not match for '
'distributed tensor inputs '
'DistributedValues:.+'):
distributed_training_utils.validate_distributed_dataset_inputs(
strategy, x, y)
def test_unsupported_features(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not '
'supported when input `x` is a dataset or a '
'dataset iterator.+'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported when input '
'`x` is a dataset or a dataset iterator.'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test with not specifying the `steps` argument.
with self.assertRaisesRegexp(
ValueError, 'you should specify the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should specify the `steps` argument'):
model.predict(dataset, verbose=0)
def test_calling_with_unsupported_predefined_callbacks(self):
with self.cached_session():
model = get_model()
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae']
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(optimizer, loss, metrics=metrics, distribute=strategy)
dataset = get_dataset(strategy)
def schedule(_):
return 0.001
with self.assertRaisesRegexp(ValueError,
'LearningRateScheduler callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
with self.assertRaisesRegexp(ValueError,
'ReduceLROnPlateau callback is not '
'supported with DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.ReduceLROnPlateau()])
with self.assertRaisesRegexp(ValueError,
'histogram_freq in the TensorBoard callback '
'is not supported when using '
'DistributionStrategy.'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.TensorBoard(histogram_freq=10)])
class TestDistributionStrategyWithLossMasking(test.TestCase):
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
def test_masking(self):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
'/device:GPU:0'])
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=strategy)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)
self.assertEqual(hist.history['loss'][0], 0)
class TestDistributionStrategyWithNormalizationLayer(
test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_batchnorm_correctness(self, distribution):
with self.cached_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
x = x.astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, x))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 32, distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)
predict_dataset = predict_dataset.repeat(100)
predict_dataset = batch_wrapper(predict_dataset, 32, distribution)
model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)
out = model.predict(predict_dataset, steps=2)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
@combinations.generate(strategy_combinations())
def test_metric_correctness(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution)
batch_size = 64
batch_size //= distribution.num_replicas_in_sync
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=1, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0])
@combinations.generate(strategy_and_inputs())
def test_correctness(self, distribution, use_numpy):
with self.cached_session():
tolerance = 1e-5
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
# TODO(b/119257215): use the default one once the flakyness is fixed.
tolerance = 1e-4
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
# Train, eval, and predict datasets are created with the same input numpy
# arrays.
# TODO(xiejw): Change this back to 10000, once we support final partial
# batch.
num_samples = 9984
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_predict = [[1.], [2.], [3.], [4.]]
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run. In addition, we add few non-linear layers to make
# it non-trivial.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
initial_weights = model.get_weights()
def fit_and_predict(with_distribution=None):
# We have initialized the model to the same weight for the distribution
# and non-distribution run.
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
distribute=with_distribution)
training_inputs, eval_inputs, predict_inputs = (
get_correctness_test_inputs(use_numpy, with_distribution,
x_train, y_train, x_predict))
model.fit(**training_inputs)
eval_result = model.evaluate(**eval_inputs)
weights = model.get_weights()
predict_result = model.predict(**predict_inputs)
return weights, eval_result, predict_result
wts_with_ds, eval_with_ds, predict_with_ds = fit_and_predict(
with_distribution=distribution)
wts_without_ds, eval_without_ds, predict_without_ds = fit_and_predict(
with_distribution=None)
# Verify that the weights, eval results, predict outputs are the same
# within some limits of tolerance.
self.assertAllClose(
wts_with_ds, wts_without_ds, atol=tolerance, rtol=tolerance)
self.assertAllClose(
eval_with_ds, eval_without_ds, atol=tolerance, rtol=tolerance)
self.assertAllClose(
predict_with_ds, predict_without_ds, atol=tolerance, rtol=tolerance)
# TODO(priyag): Add a test for TPUStrategy with steps_per_run > 1.
if __name__ == '__main__':
test.main()
| true
| true
|
790b6887e682768cff2b7bca7afbf7c67192f24e
| 2,942
|
py
|
Python
|
scripts/json_validate.py
|
KhronosGroup/VulkanSC-Docs
|
1a39268a1c690cacd8713a5ae3121937492685a5
|
[
"ECL-2.0",
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 15
|
2022-03-01T21:17:13.000Z
|
2022-03-28T19:58:42.000Z
|
scripts/json_validate.py
|
KhronosGroup/VulkanSC-Docs
|
1a39268a1c690cacd8713a5ae3121937492685a5
|
[
"ECL-2.0",
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
scripts/json_validate.py
|
KhronosGroup/VulkanSC-Docs
|
1a39268a1c690cacd8713a5ae3121937492685a5
|
[
"ECL-2.0",
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
#!/usr/bin/python3 -i
#
# Copyright (c) 2020 The Khronos Group Inc.
#
# SPDX-License-Identifier: Apache-2.0
# Description:
# -----------
# This script validates a json pipeline file against the schema files.
import os,sys
import re
import argparse
import json
import jsonschema
base_schema_filename = os.path.join("..", "json", "vk.json")
vkpcc_schema_filename = os.path.join("..", "json", "vkpcc.json")
# Parses input arguments
def ParseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('json_file', help='The json file to validate')
return parser.parse_args()
def main():
args = ParseArgs()
jsonText = ""
baseSchemaText = ""
vkSchemaText = ""
# Exit with error if json or schema files do not exist
if not os.path.exists(args.json_file):
print('Error: json file \"%s\" does not exist.' % args.json_file)
sys.exit(1)
elif not os.path.exists(base_schema_filename):
print('Error: json file \"%s\" does not exist.' % base_schema_filename)
sys.exit(1)
elif not os.path.exists(vkpcc_schema_filename):
print('Error: json file \"%s\" does not exist.' % vkpcc_schema_filename)
sys.exit(1)
# Read the json schemas files in as text
with open(base_schema_filename) as baseSchemaFile:
baseSchemaText = baseSchemaFile.read()
with open(vkpcc_schema_filename) as vkSchemaFile:
vkSchemaText = vkSchemaFile.read()
with open(args.json_file) as jsonFile:
jsonText = jsonFile.read()
baseSchema = json.loads(baseSchemaText)
vkSchema = json.loads(vkSchemaText)
jsonData = json.loads(jsonText)
# Ensure that the generated vk.json schema is a valid schema
try:
jsonschema.Draft4Validator.check_schema(baseSchema)
print(base_schema_filename, "is valid")
except jsonschema.SchemaError as e:
print(base_schema_filename, "error: " + str(e))
# Ensure that vkpcc.json is also a valid schema
try:
jsonschema.Draft4Validator.check_schema(vkSchema)
print(vkpcc_schema_filename, "schema is valid")
except jsonschema.exceptions.SchemaError as e:
print(vkpcc_schema_filename, "schema error: " + str(e))
# Construct a schema validator object from the two schema files
schemaRefStore = {
baseSchema["id"] : baseSchema,
vkSchema["id"] : vkSchema
}
resolver = jsonschema.RefResolver.from_schema(baseSchema, store=schemaRefStore)
validator = jsonschema.Draft4Validator(vkSchema, resolver=resolver)
# Validate the input .json file using the schemas
for error in sorted(validator.iter_errors(jsonData), key=str):
print(error.message)
print(list(error.path))
for suberror in sorted(error.context, key=lambda e: e.schema_path):
print(list(suberror.path), suberror.message, sep="\n")
print("\n")
if __name__ == '__main__':
main()
| 34.209302
| 84
| 0.676071
|
import os,sys
import re
import argparse
import json
import jsonschema
base_schema_filename = os.path.join("..", "json", "vk.json")
vkpcc_schema_filename = os.path.join("..", "json", "vkpcc.json")
def ParseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('json_file', help='The json file to validate')
return parser.parse_args()
def main():
args = ParseArgs()
jsonText = ""
baseSchemaText = ""
vkSchemaText = ""
if not os.path.exists(args.json_file):
print('Error: json file \"%s\" does not exist.' % args.json_file)
sys.exit(1)
elif not os.path.exists(base_schema_filename):
print('Error: json file \"%s\" does not exist.' % base_schema_filename)
sys.exit(1)
elif not os.path.exists(vkpcc_schema_filename):
print('Error: json file \"%s\" does not exist.' % vkpcc_schema_filename)
sys.exit(1)
with open(base_schema_filename) as baseSchemaFile:
baseSchemaText = baseSchemaFile.read()
with open(vkpcc_schema_filename) as vkSchemaFile:
vkSchemaText = vkSchemaFile.read()
with open(args.json_file) as jsonFile:
jsonText = jsonFile.read()
baseSchema = json.loads(baseSchemaText)
vkSchema = json.loads(vkSchemaText)
jsonData = json.loads(jsonText)
try:
jsonschema.Draft4Validator.check_schema(baseSchema)
print(base_schema_filename, "is valid")
except jsonschema.SchemaError as e:
print(base_schema_filename, "error: " + str(e))
try:
jsonschema.Draft4Validator.check_schema(vkSchema)
print(vkpcc_schema_filename, "schema is valid")
except jsonschema.exceptions.SchemaError as e:
print(vkpcc_schema_filename, "schema error: " + str(e))
schemaRefStore = {
baseSchema["id"] : baseSchema,
vkSchema["id"] : vkSchema
}
resolver = jsonschema.RefResolver.from_schema(baseSchema, store=schemaRefStore)
validator = jsonschema.Draft4Validator(vkSchema, resolver=resolver)
for error in sorted(validator.iter_errors(jsonData), key=str):
print(error.message)
print(list(error.path))
for suberror in sorted(error.context, key=lambda e: e.schema_path):
print(list(suberror.path), suberror.message, sep="\n")
print("\n")
if __name__ == '__main__':
main()
| true
| true
|
790b68ed00b159400b5367fe23b82c6baa01cf1b
| 2,035
|
py
|
Python
|
common/models/mixins/description.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 14
|
2020-03-25T11:11:29.000Z
|
2022-03-08T20:41:33.000Z
|
common/models/mixins/description.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 352
|
2020-03-25T10:42:09.000Z
|
2022-03-30T15:32:26.000Z
|
common/models/mixins/description.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 3
|
2020-08-06T12:22:41.000Z
|
2022-01-16T11:51:12.000Z
|
from django.db.models.fields import Field
from django.urls import NoReverseMatch
from django.urls import reverse
from polymorphic.managers import PolymorphicManager
from common.business_rules import NoBlankDescription
from common.business_rules import UpdateValidity
from common.models.mixins.validity import ValidityStartMixin
from common.models.mixins.validity import ValidityStartQueryset
from common.models.records import TrackedModelQuerySet
from common.util import classproperty
class DescriptionQueryset(ValidityStartQueryset, TrackedModelQuerySet):
pass
class DescriptionMixin(ValidityStartMixin):
objects = PolymorphicManager.from_queryset(DescriptionQueryset)()
business_rules = (
NoBlankDescription,
UpdateValidity,
)
@classproperty
def described_object_field(cls) -> Field:
for rel in cls.relations.keys():
if rel.name.startswith("described_"):
return rel
raise TypeError(f"{cls} should have a described field.")
@classproperty
def validity_over(cls):
return cls.described_object_field.name
def get_described_object(self):
return getattr(self, self.described_object_field.name)
def get_url(self, action="detail"):
kwargs = {}
if action != "list":
kwargs = self.get_identifying_fields()
described_object = self.get_described_object()
for field, value in described_object.get_identifying_fields().items():
kwargs[f"{self.described_object_field.name}__{field}"] = value
try:
return reverse(
f"{self.get_url_pattern_name_prefix()}-ui-{action}",
kwargs=kwargs,
)
except NoReverseMatch:
return
def __str__(self):
return self.identifying_fields_to_string(
identifying_fields=(
self.described_object_field.name,
"validity_start",
),
)
class Meta:
abstract = True
| 31.307692
| 82
| 0.679607
|
from django.db.models.fields import Field
from django.urls import NoReverseMatch
from django.urls import reverse
from polymorphic.managers import PolymorphicManager
from common.business_rules import NoBlankDescription
from common.business_rules import UpdateValidity
from common.models.mixins.validity import ValidityStartMixin
from common.models.mixins.validity import ValidityStartQueryset
from common.models.records import TrackedModelQuerySet
from common.util import classproperty
class DescriptionQueryset(ValidityStartQueryset, TrackedModelQuerySet):
pass
class DescriptionMixin(ValidityStartMixin):
objects = PolymorphicManager.from_queryset(DescriptionQueryset)()
business_rules = (
NoBlankDescription,
UpdateValidity,
)
@classproperty
def described_object_field(cls) -> Field:
for rel in cls.relations.keys():
if rel.name.startswith("described_"):
return rel
raise TypeError(f"{cls} should have a described field.")
@classproperty
def validity_over(cls):
return cls.described_object_field.name
def get_described_object(self):
return getattr(self, self.described_object_field.name)
def get_url(self, action="detail"):
kwargs = {}
if action != "list":
kwargs = self.get_identifying_fields()
described_object = self.get_described_object()
for field, value in described_object.get_identifying_fields().items():
kwargs[f"{self.described_object_field.name}__{field}"] = value
try:
return reverse(
f"{self.get_url_pattern_name_prefix()}-ui-{action}",
kwargs=kwargs,
)
except NoReverseMatch:
return
def __str__(self):
return self.identifying_fields_to_string(
identifying_fields=(
self.described_object_field.name,
"validity_start",
),
)
class Meta:
abstract = True
| true
| true
|
790b6a5adb8a57ca2f1be67f7f489b48f270744e
| 6,711
|
py
|
Python
|
tests/layers/test_layers_normalization.py
|
OliverZijia/tensorlayer2
|
01113b53e84a3bbb298b9c35ebd53254e487350f
|
[
"Apache-2.0"
] | null | null | null |
tests/layers/test_layers_normalization.py
|
OliverZijia/tensorlayer2
|
01113b53e84a3bbb298b9c35ebd53254e487350f
|
[
"Apache-2.0"
] | null | null | null |
tests/layers/test_layers_normalization.py
|
OliverZijia/tensorlayer2
|
01113b53e84a3bbb298b9c35ebd53254e487350f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from tensorlayer.models import Model
from tests.utils import CustomTestCase
class Laye_BatchNorm_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
x_1_input_shape =[None, 100, 1]
x_2_input_shape =[None, 100, 100, 3]
x_3_input_shape =[None, 100, 100, 100, 3]
batchsize = 2
cls.x1 = tf.random.normal([batchsize] + x_1_input_shape[1:])
cls.x2 = tf.random.normal([batchsize] + x_2_input_shape[1:])
cls.x3 = tf.random.normal([batchsize] + x_3_input_shape[1:])
## Base
ni_1 = Input(x_1_input_shape, name='test_ni1')
nn_1 = Conv1d(
n_filter=32, filter_size=5, stride=2, name='test_conv1d'
)(ni_1)
n1_b = BatchNorm(name='test_conv')(nn_1)
cls.n1_b = n1_b
cls.base_1d = Model(inputs=ni_1, outputs=n1_b, name='test_base_1d')
ni_2 = Input(x_2_input_shape, name='test_ni2')
nn_2 = Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d'
)(ni_2)
n2_b = BatchNorm2d(name='test_bn2d')(nn_2)
cls.n2_b = n2_b
cls.base_2d = Model(inputs=ni_2, outputs=n2_b, name='test_base_2d')
ni_3 = Input(x_3_input_shape, name='test_ni2')
nn_3 = Conv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d'
)(ni_3)
n3_b = BatchNorm3d(name='test_bn3d')(nn_3)
cls.n3_b = n3_b
cls.base_3d = Model(inputs=ni_3, outputs=n3_b, name='test_base_3d')
## 1D ========================================================================
nin_1 = Input(x_1_input_shape, name='test_in1')
n1 = Conv1d(
n_filter=32, filter_size=5, stride=2, name='test_conv1d'
)(nin_1)
n1 = BatchNorm1d(name='test_bn1d')(n1)
cls.n1 = n1
cls.static_1d = Model(inputs=nin_1, outputs=n1)
class bn_1d_model(Model):
def __init__(self):
super(bn_1d_model, self).__init__(name='test_bn_1d_model')
self.conv = Conv1d(n_filter=32, filter_size=5, stride=2, name='test_conv1d', in_channels=1)
self.bn = BatchNorm1d(num_features=32, name='test_bn1d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_1d = bn_1d_model()
print("Printing BatchNorm1d")
print(cls.static_1d)
print(cls.dynamic_1d)
## 2D ========================================================================
nin_2 = Input(x_2_input_shape, name='test_in2')
n2 = Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d'
)(nin_2)
n2 = BatchNorm2d(name='test_bn2d')(n2)
cls.n2 = n2
cls.static_2d = Model(inputs=nin_2, outputs=n2)
class bn_2d_model(Model):
def __init__(self):
super(bn_2d_model, self).__init__(name='test_bn_2d_model')
self.conv = Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d', in_channels=3)
self.bn = BatchNorm2d(num_features=32, name='test_bn2d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_2d = bn_2d_model()
print("Printing BatchNorm1d")
print(cls.static_2d)
print(cls.dynamic_2d)
## 3D ========================================================================
nin_3 = Input(x_3_input_shape, name='test_in3')
n3 = Conv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d'
)(nin_3)
n3 = BatchNorm3d(name='test_bn3d', act=tf.nn.relu)(n3)
cls.n3 = n3
cls.static_3d = Model(inputs=nin_3, outputs=n3)
class bn_3d_model(Model):
def __init__(self):
super(bn_3d_model, self).__init__(name='test_bn_3d_model')
self.conv = Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d', in_channels=3)
self.bn = BatchNorm3d(num_features=32, name='test_bn3d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_3d = bn_3d_model()
print("Printing BatchNorm1d")
print(cls.static_3d)
print(cls.dynamic_3d)
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_BatchNorm(self):
self.assertEqual(self.n1_b.shape[1:], (50, 32))
out = self.base_1d(self.x1, is_train=True)
self.assertEqual(self.n2_b.shape[1:], (50, 50, 32))
out = self.base_2d(self.x2, is_train=True)
self.assertEqual(self.n3_b.shape[1:], (50, 50, 50, 32))
out = self.base_3d(self.x3, is_train=True)
def test_BatchNorm1d(self):
self.assertEqual(self.n1.shape[1:], (50, 32))
out = self.static_1d(self.x1, is_train=True)
out = self.dynamic_1d(self.x1, is_train=True)
def test_BatchNorm2d(self):
self.assertEqual(self.n2.shape[1:], (50, 50, 32))
out = self.static_2d(self.x2, is_train=True)
out = self.dynamic_2d(self.x2, is_train=True)
out = self.dynamic_2d(self.x2, is_train=False)
def test_BatchNorm3d(self):
self.assertEqual(self.n3.shape[1:], (50, 50, 50, 32))
out = self.static_3d(self.x3, is_train=True)
out = self.dynamic_3d(self.x3, is_train=True)
def test_dataformat(self):
bn1d = BatchNorm1d(data_format='channels_first', num_features=32)
bn2d = BatchNorm2d(data_format='channels_first', num_features=32)
bn3d = BatchNorm3d(data_format='channels_first', num_features=32)
bn = BatchNorm(data_format='channels_first')
try:
bn_fail = BatchNorm1d(data_format='xyz', num_features=32)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
def test_exception(self):
try:
bn = BatchNorm(num_features=32)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
try:
ni = Input([None, 100, 1], name='test_ni1')
bn = BatchNorm(decay=1.5)(ni)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
if __name__ == '__main__':
tl.logging.set_verbosity(tl.logging.DEBUG)
unittest.main()
| 32.736585
| 124
| 0.573387
|
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from tensorlayer.models import Model
from tests.utils import CustomTestCase
class Laye_BatchNorm_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
x_1_input_shape =[None, 100, 1]
x_2_input_shape =[None, 100, 100, 3]
x_3_input_shape =[None, 100, 100, 100, 3]
batchsize = 2
cls.x1 = tf.random.normal([batchsize] + x_1_input_shape[1:])
cls.x2 = tf.random.normal([batchsize] + x_2_input_shape[1:])
cls.x3 = tf.random.normal([batchsize] + x_3_input_shape[1:])
ni_1 = Input(x_1_input_shape, name='test_ni1')
nn_1 = Conv1d(
n_filter=32, filter_size=5, stride=2, name='test_conv1d'
)(ni_1)
n1_b = BatchNorm(name='test_conv')(nn_1)
cls.n1_b = n1_b
cls.base_1d = Model(inputs=ni_1, outputs=n1_b, name='test_base_1d')
ni_2 = Input(x_2_input_shape, name='test_ni2')
nn_2 = Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d'
)(ni_2)
n2_b = BatchNorm2d(name='test_bn2d')(nn_2)
cls.n2_b = n2_b
cls.base_2d = Model(inputs=ni_2, outputs=n2_b, name='test_base_2d')
ni_3 = Input(x_3_input_shape, name='test_ni2')
nn_3 = Conv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d'
)(ni_3)
n3_b = BatchNorm3d(name='test_bn3d')(nn_3)
cls.n3_b = n3_b
cls.base_3d = Model(inputs=ni_3, outputs=n3_b, name='test_base_3d')
d(
n_filter=32, filter_size=5, stride=2, name='test_conv1d'
)(nin_1)
n1 = BatchNorm1d(name='test_bn1d')(n1)
cls.n1 = n1
cls.static_1d = Model(inputs=nin_1, outputs=n1)
class bn_1d_model(Model):
def __init__(self):
super(bn_1d_model, self).__init__(name='test_bn_1d_model')
self.conv = Conv1d(n_filter=32, filter_size=5, stride=2, name='test_conv1d', in_channels=1)
self.bn = BatchNorm1d(num_features=32, name='test_bn1d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_1d = bn_1d_model()
print("Printing BatchNorm1d")
print(cls.static_1d)
print(cls.dynamic_1d)
d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d'
)(nin_2)
n2 = BatchNorm2d(name='test_bn2d')(n2)
cls.n2 = n2
cls.static_2d = Model(inputs=nin_2, outputs=n2)
class bn_2d_model(Model):
def __init__(self):
super(bn_2d_model, self).__init__(name='test_bn_2d_model')
self.conv = Conv2d(n_filter=32, filter_size=(3, 3), strides=(2, 2), name='test_conv2d', in_channels=3)
self.bn = BatchNorm2d(num_features=32, name='test_bn2d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_2d = bn_2d_model()
print("Printing BatchNorm1d")
print(cls.static_2d)
print(cls.dynamic_2d)
d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d'
)(nin_3)
n3 = BatchNorm3d(name='test_bn3d', act=tf.nn.relu)(n3)
cls.n3 = n3
cls.static_3d = Model(inputs=nin_3, outputs=n3)
class bn_3d_model(Model):
def __init__(self):
super(bn_3d_model, self).__init__(name='test_bn_3d_model')
self.conv = Conv3d(n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2), name='test_conv3d', in_channels=3)
self.bn = BatchNorm3d(num_features=32, name='test_bn3d')
def forward(self, x):
x = self.bn(self.conv(x))
return x
cls.dynamic_3d = bn_3d_model()
print("Printing BatchNorm1d")
print(cls.static_3d)
print(cls.dynamic_3d)
@classmethod
def tearDownClass(cls):
pass
def test_BatchNorm(self):
self.assertEqual(self.n1_b.shape[1:], (50, 32))
out = self.base_1d(self.x1, is_train=True)
self.assertEqual(self.n2_b.shape[1:], (50, 50, 32))
out = self.base_2d(self.x2, is_train=True)
self.assertEqual(self.n3_b.shape[1:], (50, 50, 50, 32))
out = self.base_3d(self.x3, is_train=True)
def test_BatchNorm1d(self):
self.assertEqual(self.n1.shape[1:], (50, 32))
out = self.static_1d(self.x1, is_train=True)
out = self.dynamic_1d(self.x1, is_train=True)
def test_BatchNorm2d(self):
self.assertEqual(self.n2.shape[1:], (50, 50, 32))
out = self.static_2d(self.x2, is_train=True)
out = self.dynamic_2d(self.x2, is_train=True)
out = self.dynamic_2d(self.x2, is_train=False)
def test_BatchNorm3d(self):
self.assertEqual(self.n3.shape[1:], (50, 50, 50, 32))
out = self.static_3d(self.x3, is_train=True)
out = self.dynamic_3d(self.x3, is_train=True)
def test_dataformat(self):
bn1d = BatchNorm1d(data_format='channels_first', num_features=32)
bn2d = BatchNorm2d(data_format='channels_first', num_features=32)
bn3d = BatchNorm3d(data_format='channels_first', num_features=32)
bn = BatchNorm(data_format='channels_first')
try:
bn_fail = BatchNorm1d(data_format='xyz', num_features=32)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
def test_exception(self):
try:
bn = BatchNorm(num_features=32)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
try:
ni = Input([None, 100, 1], name='test_ni1')
bn = BatchNorm(decay=1.5)(ni)
except Exception as e:
self.assertIsInstance(e, ValueError)
print(e)
if __name__ == '__main__':
tl.logging.set_verbosity(tl.logging.DEBUG)
unittest.main()
| true
| true
|
790b6af8adee979ccd9271e289e01f6aee09a4f1
| 1,256
|
py
|
Python
|
studies/migrations/0016_auto_20170710_1438.py
|
enrobyn/lookit-api
|
621fbb8b25100a21fd94721d39003b5d4f651dc5
|
[
"MIT"
] | null | null | null |
studies/migrations/0016_auto_20170710_1438.py
|
enrobyn/lookit-api
|
621fbb8b25100a21fd94721d39003b5d4f651dc5
|
[
"MIT"
] | null | null | null |
studies/migrations/0016_auto_20170710_1438.py
|
enrobyn/lookit-api
|
621fbb8b25100a21fd94721d39003b5d4f651dc5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-10 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0015_auto_20170707_1820'),
]
operations = [
migrations.AlterModelOptions(
name='study',
options={'ordering': ['name'], 'permissions': (('can_view_study', 'Can View Study'), ('can_create_study', 'Can Create Study'), ('can_edit_study', 'Can Edit Study'), ('can_remove_study', 'Can Remove Study'), ('can_activate_study', 'Can Activate Study'), ('can_deactivate_study', 'Can Deactivate Study'), ('can_pause_study', 'Can Pause Study'), ('can_resume_study', 'Can Resume Study'), ('can_approve_study', 'Can Approve Study'), ('can_submit_study', 'Can Submit Study'), ('can_retract_study', 'Can Retract Study'), ('can_resubmit_study', 'Can Resubmit Study'), ('can_edit_study_permissions', 'Can Edit Study Permissions'), ('can_view_study_permissions', 'Can View Study Permissions'), ('can_view_study_responses', 'Can View Study Responses'), ('can_view_study_video_responses', 'Can View Study Video Responses'), ('can_view_study_demographics', 'Can View Study Demographics'))},
),
]
| 62.8
| 890
| 0.700637
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studies', '0015_auto_20170707_1820'),
]
operations = [
migrations.AlterModelOptions(
name='study',
options={'ordering': ['name'], 'permissions': (('can_view_study', 'Can View Study'), ('can_create_study', 'Can Create Study'), ('can_edit_study', 'Can Edit Study'), ('can_remove_study', 'Can Remove Study'), ('can_activate_study', 'Can Activate Study'), ('can_deactivate_study', 'Can Deactivate Study'), ('can_pause_study', 'Can Pause Study'), ('can_resume_study', 'Can Resume Study'), ('can_approve_study', 'Can Approve Study'), ('can_submit_study', 'Can Submit Study'), ('can_retract_study', 'Can Retract Study'), ('can_resubmit_study', 'Can Resubmit Study'), ('can_edit_study_permissions', 'Can Edit Study Permissions'), ('can_view_study_permissions', 'Can View Study Permissions'), ('can_view_study_responses', 'Can View Study Responses'), ('can_view_study_video_responses', 'Can View Study Video Responses'), ('can_view_study_demographics', 'Can View Study Demographics'))},
),
]
| true
| true
|
790b6d414919630962a222dd3ec287a10b3e49aa
| 13,247
|
py
|
Python
|
trimesh/exchange/dae.py
|
jengvi/trimesh
|
aeefe89a27ae17c3f4d6286b9a2ba1623329a286
|
[
"MIT"
] | 1
|
2021-08-19T13:29:38.000Z
|
2021-08-19T13:29:38.000Z
|
trimesh/exchange/dae.py
|
jengvi/trimesh
|
aeefe89a27ae17c3f4d6286b9a2ba1623329a286
|
[
"MIT"
] | null | null | null |
trimesh/exchange/dae.py
|
jengvi/trimesh
|
aeefe89a27ae17c3f4d6286b9a2ba1623329a286
|
[
"MIT"
] | 3
|
2018-04-10T15:44:44.000Z
|
2021-07-23T07:19:10.000Z
|
import io
import copy
import uuid
import numpy as np
try:
# pip install pycollada
import collada
except BaseException:
collada = None
try:
import PIL.Image
except ImportError:
pass
from .. import util
from .. import visual
from ..constants import log
def load_collada(file_obj, resolver=None, **kwargs):
"""
Load a COLLADA (.dae) file into a list of trimesh kwargs.
Parameters
----------
file_obj : file object
Containing a COLLADA file
resolver : trimesh.visual.Resolver or None
For loading referenced files, like texture images
kwargs : **
Passed to trimesh.Trimesh.__init__
Returns
-------
loaded : list of dict
kwargs for Trimesh constructor
"""
# load scene using pycollada
c = collada.Collada(file_obj)
# Create material map from Material ID to trimesh material
material_map = {}
for m in c.materials:
effect = m.effect
material_map[m.id] = _parse_material(effect, resolver)
# name : kwargs
meshes = {}
# list of dict
graph = []
for node in c.scene.nodes:
_parse_node(node=node,
parent_matrix=np.eye(4),
material_map=material_map,
meshes=meshes,
graph=graph,
resolver=resolver)
# create kwargs for load_kwargs
result = {'class': 'Scene',
'graph': graph,
'geometry': meshes}
return result
def export_collada(mesh, **kwargs):
"""
Export a mesh or a list of meshes as a COLLADA .dae file.
Parameters
-----------
mesh: Trimesh object or list of Trimesh objects
The mesh(es) to export.
Returns
-----------
export: str, string of COLLADA format output
"""
meshes = mesh
if not isinstance(mesh, (list, tuple, set, np.ndarray)):
meshes = [mesh]
c = collada.Collada()
nodes = []
for i, m in enumerate(meshes):
# Load uv, colors, materials
uv = None
colors = None
mat = _unparse_material(None)
if m.visual.defined:
if m.visual.kind == 'texture':
mat = _unparse_material(m.visual.material)
uv = m.visual.uv
elif m.visual.kind == 'vertex':
colors = (m.visual.vertex_colors / 255.0)[:, :3]
c.effects.append(mat.effect)
c.materials.append(mat)
# Create geometry object
vertices = collada.source.FloatSource(
'verts-array', m.vertices.flatten(), ('X', 'Y', 'Z'))
normals = collada.source.FloatSource(
'normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z'))
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', '#verts-array')
input_list.addInput(1, 'NORMAL', '#normals-array')
arrays = [vertices, normals]
if uv is not None:
texcoords = collada.source.FloatSource(
'texcoords-array', uv.flatten(), ('U', 'V'))
input_list.addInput(2, 'TEXCOORD', '#texcoords-array')
arrays.append(texcoords)
if colors is not None:
idx = 2
if uv:
idx = 3
colors = collada.source.FloatSource('colors-array',
colors.flatten(), ('R', 'G', 'B'))
input_list.addInput(idx, 'COLOR', '#colors-array')
arrays.append(colors)
geom = collada.geometry.Geometry(
c, uuid.uuid4().hex, uuid.uuid4().hex, arrays
)
indices = np.repeat(m.faces.flatten(), len(arrays))
matref = u'material{}'.format(i)
triset = geom.createTriangleSet(indices, input_list, matref)
geom.primitives.append(triset)
c.geometries.append(geom)
matnode = collada.scene.MaterialNode(matref, mat, inputs=[])
geomnode = collada.scene.GeometryNode(geom, [matnode])
node = collada.scene.Node(u'node{}'.format(i), children=[geomnode])
nodes.append(node)
scene = collada.scene.Scene('scene', nodes)
c.scenes.append(scene)
c.scene = scene
b = io.BytesIO()
c.write(b)
b.seek(0)
return b.read()
def _parse_node(node,
parent_matrix,
material_map,
meshes,
graph,
resolver=None):
"""
Recursively parse COLLADA scene nodes.
"""
# Parse mesh node
if isinstance(node, collada.scene.GeometryNode):
geometry = node.geometry
# Create local material map from material symbol to actual material
local_material_map = {}
for mn in node.materials:
symbol = mn.symbol
m = mn.target
if m.id in material_map:
local_material_map[symbol] = material_map[m.id]
else:
local_material_map[symbol] = _parse_material(m, resolver)
# Iterate over primitives of geometry
for i, primitive in enumerate(geometry.primitives):
if isinstance(primitive, collada.polylist.Polylist):
primitive = primitive.triangleset()
if isinstance(primitive, collada.triangleset.TriangleSet):
vertex = primitive.vertex
vertex_index = primitive.vertex_index
vertices = vertex[vertex_index].reshape(
len(vertex_index) * 3, 3)
# Get normals if present
normals = None
if primitive.normal is not None:
normal = primitive.normal
normal_index = primitive.normal_index
normals = normal[normal_index].reshape(
len(normal_index) * 3, 3)
# Get colors if present
colors = None
s = primitive.sources
if ('COLOR' in s and len(s['COLOR'])
> 0 and len(primitive.index) > 0):
color = s['COLOR'][0][4].data
color_index = primitive.index[:, :, s['COLOR'][0][0]]
colors = color[color_index].reshape(
len(color_index) * 3, 3)
faces = np.arange(
vertices.shape[0]).reshape(
vertices.shape[0] // 3, 3)
# Get UV coordinates if possible
vis = None
if primitive.material in local_material_map:
material = copy.copy(
local_material_map[primitive.material])
uv = None
if len(primitive.texcoordset) > 0:
texcoord = primitive.texcoordset[0]
texcoord_index = primitive.texcoord_indexset[0]
uv = texcoord[texcoord_index].reshape(
(len(texcoord_index) * 3, 2))
vis = visual.texture.TextureVisuals(
uv=uv, material=material)
primid = u'{}.{}'.format(geometry.id, i)
meshes[primid] = {
'vertices': vertices,
'faces': faces,
'vertex_normals': normals,
'vertex_colors': colors,
'visual': vis}
graph.append({'frame_to': primid,
'matrix': parent_matrix,
'geometry': primid})
# recurse down tree for nodes with children
elif isinstance(node, collada.scene.Node):
if node.children is not None:
for child in node.children:
# create the new matrix
matrix = np.dot(parent_matrix, node.matrix)
# parse the child node
_parse_node(
node=child,
parent_matrix=matrix,
material_map=material_map,
meshes=meshes,
graph=graph,
resolver=resolver)
elif isinstance(node, collada.scene.CameraNode):
# TODO: convert collada cameras to trimesh cameras
pass
elif isinstance(node, collada.scene.LightNode):
# TODO: convert collada lights to trimesh lights
pass
def _load_texture(file_name, resolver):
"""
Load a texture from a file into a PIL image.
"""
file_data = resolver.get(file_name)
image = PIL.Image.open(util.wrap_as_stream(file_data))
return image
def _parse_material(effect, resolver):
"""
Turn a COLLADA effect into a trimesh material.
"""
# Compute base color
baseColorFactor = np.ones(4)
baseColorTexture = None
if isinstance(effect.diffuse, collada.material.Map):
try:
baseColorTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load base texture',
exc_info=True)
elif effect.diffuse is not None:
baseColorFactor = effect.diffuse
# Compute emission color
emissiveFactor = np.zeros(3)
emissiveTexture = None
if isinstance(effect.emission, collada.material.Map):
try:
emissiveTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load emissive texture',
exc_info=True)
elif effect.emission is not None:
emissiveFactor = effect.emission[:3]
# Compute roughness
roughnessFactor = 1.0
if (not isinstance(effect.shininess, collada.material.Map)
and effect.shininess is not None):
roughnessFactor = np.sqrt(2.0 / (2.0 + effect.shininess))
# Compute metallic factor
metallicFactor = 0.0
# Compute normal texture
normalTexture = None
if effect.bumpmap is not None:
try:
normalTexture = _load_texture(
effect.bumpmap.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load bumpmap',
exc_info=True)
# Compute opacity
if (effect.transparent is not None
and not isinstance(effect.transparent, collada.material.Map)):
baseColorFactor = tuple(np.append(baseColorFactor[:3], effect.transparent[3]))
return visual.material.PBRMaterial(
emissiveFactor=emissiveFactor,
emissiveTexture=emissiveTexture,
normalTexture=normalTexture,
baseColorTexture=baseColorTexture,
baseColorFactor=baseColorFactor,
metallicFactor=metallicFactor,
roughnessFactor=roughnessFactor)
def _unparse_material(material):
"""
Turn a trimesh material into a COLLADA material.
"""
# TODO EXPORT TEXTURES
if isinstance(material, visual.material.PBRMaterial):
diffuse = material.baseColorFactor
if diffuse is not None:
diffuse = list(diffuse)
emission = material.emissiveFactor
if emission is not None:
emission = [float(emission[0]), float(emission[1]),
float(emission[2]), 1.0]
shininess = material.roughnessFactor
if shininess is not None:
shininess = 2.0 / shininess**2 - 2.0
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong',
diffuse=diffuse, emission=emission,
specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess)
)
material = collada.material.Material(
uuid.uuid4().hex, 'pbrmaterial', effect
)
else:
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong'
)
material = collada.material.Material(
uuid.uuid4().hex, 'defaultmaterial', effect
)
return material
def load_zae(file_obj, resolver=None, **kwargs):
"""
Load a ZAE file, which is just a zipped DAE file.
Parameters
-------------
file_obj : file object
Contains ZAE data
resolver : trimesh.visual.Resolver
Resolver to load additional assets
kwargs : dict
Passed to load_collada
Returns
------------
loaded : dict
Results of loading
"""
# a dict, {file name : file object}
archive = util.decompress(file_obj,
file_type='zip')
# load the first file with a .dae extension
file_name = next(i for i in archive.keys()
if i.lower().endswith('.dae'))
# a resolver so the loader can load textures / etc
resolver = visual.resolvers.ZipResolver(archive)
# run the regular collada loader
loaded = load_collada(archive[file_name],
resolver=resolver,
**kwargs)
return loaded
# only provide loaders if `pycollada` is installed
_collada_loaders = {}
_collada_exporters = {}
if collada is not None:
_collada_loaders['dae'] = load_collada
_collada_loaders['zae'] = load_zae
_collada_exporters['dae'] = export_collada
| 32.231144
| 86
| 0.566619
|
import io
import copy
import uuid
import numpy as np
try:
import collada
except BaseException:
collada = None
try:
import PIL.Image
except ImportError:
pass
from .. import util
from .. import visual
from ..constants import log
def load_collada(file_obj, resolver=None, **kwargs):
c = collada.Collada(file_obj)
material_map = {}
for m in c.materials:
effect = m.effect
material_map[m.id] = _parse_material(effect, resolver)
meshes = {}
graph = []
for node in c.scene.nodes:
_parse_node(node=node,
parent_matrix=np.eye(4),
material_map=material_map,
meshes=meshes,
graph=graph,
resolver=resolver)
result = {'class': 'Scene',
'graph': graph,
'geometry': meshes}
return result
def export_collada(mesh, **kwargs):
meshes = mesh
if not isinstance(mesh, (list, tuple, set, np.ndarray)):
meshes = [mesh]
c = collada.Collada()
nodes = []
for i, m in enumerate(meshes):
uv = None
colors = None
mat = _unparse_material(None)
if m.visual.defined:
if m.visual.kind == 'texture':
mat = _unparse_material(m.visual.material)
uv = m.visual.uv
elif m.visual.kind == 'vertex':
colors = (m.visual.vertex_colors / 255.0)[:, :3]
c.effects.append(mat.effect)
c.materials.append(mat)
vertices = collada.source.FloatSource(
'verts-array', m.vertices.flatten(), ('X', 'Y', 'Z'))
normals = collada.source.FloatSource(
'normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z'))
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', '#verts-array')
input_list.addInput(1, 'NORMAL', '#normals-array')
arrays = [vertices, normals]
if uv is not None:
texcoords = collada.source.FloatSource(
'texcoords-array', uv.flatten(), ('U', 'V'))
input_list.addInput(2, 'TEXCOORD', '#texcoords-array')
arrays.append(texcoords)
if colors is not None:
idx = 2
if uv:
idx = 3
colors = collada.source.FloatSource('colors-array',
colors.flatten(), ('R', 'G', 'B'))
input_list.addInput(idx, 'COLOR', '#colors-array')
arrays.append(colors)
geom = collada.geometry.Geometry(
c, uuid.uuid4().hex, uuid.uuid4().hex, arrays
)
indices = np.repeat(m.faces.flatten(), len(arrays))
matref = u'material{}'.format(i)
triset = geom.createTriangleSet(indices, input_list, matref)
geom.primitives.append(triset)
c.geometries.append(geom)
matnode = collada.scene.MaterialNode(matref, mat, inputs=[])
geomnode = collada.scene.GeometryNode(geom, [matnode])
node = collada.scene.Node(u'node{}'.format(i), children=[geomnode])
nodes.append(node)
scene = collada.scene.Scene('scene', nodes)
c.scenes.append(scene)
c.scene = scene
b = io.BytesIO()
c.write(b)
b.seek(0)
return b.read()
def _parse_node(node,
parent_matrix,
material_map,
meshes,
graph,
resolver=None):
if isinstance(node, collada.scene.GeometryNode):
geometry = node.geometry
local_material_map = {}
for mn in node.materials:
symbol = mn.symbol
m = mn.target
if m.id in material_map:
local_material_map[symbol] = material_map[m.id]
else:
local_material_map[symbol] = _parse_material(m, resolver)
for i, primitive in enumerate(geometry.primitives):
if isinstance(primitive, collada.polylist.Polylist):
primitive = primitive.triangleset()
if isinstance(primitive, collada.triangleset.TriangleSet):
vertex = primitive.vertex
vertex_index = primitive.vertex_index
vertices = vertex[vertex_index].reshape(
len(vertex_index) * 3, 3)
normals = None
if primitive.normal is not None:
normal = primitive.normal
normal_index = primitive.normal_index
normals = normal[normal_index].reshape(
len(normal_index) * 3, 3)
colors = None
s = primitive.sources
if ('COLOR' in s and len(s['COLOR'])
> 0 and len(primitive.index) > 0):
color = s['COLOR'][0][4].data
color_index = primitive.index[:, :, s['COLOR'][0][0]]
colors = color[color_index].reshape(
len(color_index) * 3, 3)
faces = np.arange(
vertices.shape[0]).reshape(
vertices.shape[0] // 3, 3)
vis = None
if primitive.material in local_material_map:
material = copy.copy(
local_material_map[primitive.material])
uv = None
if len(primitive.texcoordset) > 0:
texcoord = primitive.texcoordset[0]
texcoord_index = primitive.texcoord_indexset[0]
uv = texcoord[texcoord_index].reshape(
(len(texcoord_index) * 3, 2))
vis = visual.texture.TextureVisuals(
uv=uv, material=material)
primid = u'{}.{}'.format(geometry.id, i)
meshes[primid] = {
'vertices': vertices,
'faces': faces,
'vertex_normals': normals,
'vertex_colors': colors,
'visual': vis}
graph.append({'frame_to': primid,
'matrix': parent_matrix,
'geometry': primid})
elif isinstance(node, collada.scene.Node):
if node.children is not None:
for child in node.children:
matrix = np.dot(parent_matrix, node.matrix)
_parse_node(
node=child,
parent_matrix=matrix,
material_map=material_map,
meshes=meshes,
graph=graph,
resolver=resolver)
elif isinstance(node, collada.scene.CameraNode):
pass
elif isinstance(node, collada.scene.LightNode):
pass
def _load_texture(file_name, resolver):
file_data = resolver.get(file_name)
image = PIL.Image.open(util.wrap_as_stream(file_data))
return image
def _parse_material(effect, resolver):
baseColorFactor = np.ones(4)
baseColorTexture = None
if isinstance(effect.diffuse, collada.material.Map):
try:
baseColorTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load base texture',
exc_info=True)
elif effect.diffuse is not None:
baseColorFactor = effect.diffuse
emissiveFactor = np.zeros(3)
emissiveTexture = None
if isinstance(effect.emission, collada.material.Map):
try:
emissiveTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load emissive texture',
exc_info=True)
elif effect.emission is not None:
emissiveFactor = effect.emission[:3]
roughnessFactor = 1.0
if (not isinstance(effect.shininess, collada.material.Map)
and effect.shininess is not None):
roughnessFactor = np.sqrt(2.0 / (2.0 + effect.shininess))
metallicFactor = 0.0
normalTexture = None
if effect.bumpmap is not None:
try:
normalTexture = _load_texture(
effect.bumpmap.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load bumpmap',
exc_info=True)
if (effect.transparent is not None
and not isinstance(effect.transparent, collada.material.Map)):
baseColorFactor = tuple(np.append(baseColorFactor[:3], effect.transparent[3]))
return visual.material.PBRMaterial(
emissiveFactor=emissiveFactor,
emissiveTexture=emissiveTexture,
normalTexture=normalTexture,
baseColorTexture=baseColorTexture,
baseColorFactor=baseColorFactor,
metallicFactor=metallicFactor,
roughnessFactor=roughnessFactor)
def _unparse_material(material):
if isinstance(material, visual.material.PBRMaterial):
diffuse = material.baseColorFactor
if diffuse is not None:
diffuse = list(diffuse)
emission = material.emissiveFactor
if emission is not None:
emission = [float(emission[0]), float(emission[1]),
float(emission[2]), 1.0]
shininess = material.roughnessFactor
if shininess is not None:
shininess = 2.0 / shininess**2 - 2.0
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong',
diffuse=diffuse, emission=emission,
specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess)
)
material = collada.material.Material(
uuid.uuid4().hex, 'pbrmaterial', effect
)
else:
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong'
)
material = collada.material.Material(
uuid.uuid4().hex, 'defaultmaterial', effect
)
return material
def load_zae(file_obj, resolver=None, **kwargs):
archive = util.decompress(file_obj,
file_type='zip')
file_name = next(i for i in archive.keys()
if i.lower().endswith('.dae'))
resolver = visual.resolvers.ZipResolver(archive)
loaded = load_collada(archive[file_name],
resolver=resolver,
**kwargs)
return loaded
_collada_loaders = {}
_collada_exporters = {}
if collada is not None:
_collada_loaders['dae'] = load_collada
_collada_loaders['zae'] = load_zae
_collada_exporters['dae'] = export_collada
| true
| true
|
790b6d46830f4335c39e55e0db3124f4a021de64
| 7,161
|
py
|
Python
|
tests/syft/grid/messages/group_msg_test.py
|
divinit7/PySyft
|
261869e50852a24b2d76f3b44a5819050acd9eb8
|
[
"Apache-2.0"
] | null | null | null |
tests/syft/grid/messages/group_msg_test.py
|
divinit7/PySyft
|
261869e50852a24b2d76f3b44a5819050acd9eb8
|
[
"Apache-2.0"
] | null | null | null |
tests/syft/grid/messages/group_msg_test.py
|
divinit7/PySyft
|
261869e50852a24b2d76f3b44a5819050acd9eb8
|
[
"Apache-2.0"
] | null | null | null |
# stdlib
from typing import Any
from typing import Dict
# syft absolute
import syft as sy
from syft import serialize
from syft.core.io.address import Address
from syft.grid.messages.group_messages import CreateGroupMessage
from syft.grid.messages.group_messages import CreateGroupResponse
from syft.grid.messages.group_messages import DeleteGroupMessage
from syft.grid.messages.group_messages import DeleteGroupResponse
from syft.grid.messages.group_messages import GetGroupMessage
from syft.grid.messages.group_messages import GetGroupResponse
from syft.grid.messages.group_messages import GetGroupsMessage
from syft.grid.messages.group_messages import GetGroupsResponse
from syft.grid.messages.group_messages import UpdateGroupMessage
from syft.grid.messages.group_messages import UpdateGroupResponse
def test_create_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = CreateGroupMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_create_group_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Group Created Successfully!"}
msg = CreateGroupResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {"group_id": "f2a6as5d16fasd"}
msg = DeleteGroupMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_group_response_serde() -> None:
target = Address(name="Alice")
content = {"msg": "Group deleted Successfully!"}
msg = DeleteGroupResponse(
status_code=200,
address=target,
content=content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_update_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content = {
"group-id": "eqw9e4a5d846",
"group-name": "Brain diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = UpdateGroupMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_update_group_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Group updated successfully!"}
msg = UpdateGroupResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content = {"group-id": "eqw9e4a5d846"}
msg = GetGroupMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_group_response_serde() -> None:
target = Address(name="Alice")
content = {
"group-id": "eqw9e4a5d846",
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = GetGroupResponse(
address=target,
status_code=200,
content=content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_all_groups_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content: Dict[Any, Any] = {}
msg = GetGroupsMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_all_groups_response_serde() -> None:
target = Address(name="Alice")
request_content = {
"groups": {
"626sadaf631": {
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
},
"a84ew64wq6e": {
"group-name": "Brain diseases group",
"members": ["user-id5", "user-id7", "user-id9"],
"data": [
{"id": "26463afasd", "permissions": "read"},
{"id": "264613dafeqwe", "permissions": "write"},
{"id": "896632sdfsf", "permissions": "read"},
],
},
}
}
msg = GetGroupsResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
| 27.228137
| 68
| 0.606898
|
from typing import Any
from typing import Dict
import syft as sy
from syft import serialize
from syft.core.io.address import Address
from syft.grid.messages.group_messages import CreateGroupMessage
from syft.grid.messages.group_messages import CreateGroupResponse
from syft.grid.messages.group_messages import DeleteGroupMessage
from syft.grid.messages.group_messages import DeleteGroupResponse
from syft.grid.messages.group_messages import GetGroupMessage
from syft.grid.messages.group_messages import GetGroupResponse
from syft.grid.messages.group_messages import GetGroupsMessage
from syft.grid.messages.group_messages import GetGroupsResponse
from syft.grid.messages.group_messages import UpdateGroupMessage
from syft.grid.messages.group_messages import UpdateGroupResponse
def test_create_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = CreateGroupMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_create_group_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Group Created Successfully!"}
msg = CreateGroupResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {"group_id": "f2a6as5d16fasd"}
msg = DeleteGroupMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_group_response_serde() -> None:
target = Address(name="Alice")
content = {"msg": "Group deleted Successfully!"}
msg = DeleteGroupResponse(
status_code=200,
address=target,
content=content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_update_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content = {
"group-id": "eqw9e4a5d846",
"group-name": "Brain diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = UpdateGroupMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_update_group_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Group updated successfully!"}
msg = UpdateGroupResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_group_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content = {"group-id": "eqw9e4a5d846"}
msg = GetGroupMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_group_response_serde() -> None:
target = Address(name="Alice")
content = {
"group-id": "eqw9e4a5d846",
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
}
msg = GetGroupResponse(
address=target,
status_code=200,
content=content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_all_groups_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
content: Dict[Any, Any] = {}
msg = GetGroupsMessage(
address=target,
content=content,
reply_to=bob_vm.address,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_all_groups_response_serde() -> None:
target = Address(name="Alice")
request_content = {
"groups": {
"626sadaf631": {
"group-name": "Heart diseases group",
"members": ["user-id1", "user-id2", "user-id3"],
"data": [
{"id": "264632213", "permissions": "read"},
{"id": "264613232", "permissions": "write"},
{"id": "896632213", "permissions": "read"},
],
},
"a84ew64wq6e": {
"group-name": "Brain diseases group",
"members": ["user-id5", "user-id7", "user-id9"],
"data": [
{"id": "26463afasd", "permissions": "read"},
{"id": "264613dafeqwe", "permissions": "write"},
{"id": "896632sdfsf", "permissions": "read"},
],
},
}
}
msg = GetGroupsResponse(
address=target,
status_code=200,
content=request_content,
)
blob = serialize(msg)
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
| true
| true
|
790b6dcdb1277a0e88026226689352073e63443c
| 3,531
|
py
|
Python
|
qaseio/tests/qaseio/services/test_test_run_result.py
|
aleksandr-kotlyar/qase-python
|
3e6916eb4bf3518651e0a8e2e62281bfe0bfa464
|
[
"Apache-2.0"
] | null | null | null |
qaseio/tests/qaseio/services/test_test_run_result.py
|
aleksandr-kotlyar/qase-python
|
3e6916eb4bf3518651e0a8e2e62281bfe0bfa464
|
[
"Apache-2.0"
] | null | null | null |
qaseio/tests/qaseio/services/test_test_run_result.py
|
aleksandr-kotlyar/qase-python
|
3e6916eb4bf3518651e0a8e2e62281bfe0bfa464
|
[
"Apache-2.0"
] | null | null | null |
import json
import pytest
import requests_mock
from apitist.constructor import converter
from tests.data import _list, _status_true, _test_run_result
from qaseio.client.models import (
TestRunResultCreate,
TestRunResultCreated,
TestRunResultFilters,
TestRunResultInfo,
TestRunResultList,
TestRunResultStatus,
TestRunResultUpdate,
)
@pytest.mark.parametrize(
"params, query",
[
(
(
10,
30,
TestRunResultFilters(status=[TestRunResultStatus.FAILED]),
),
"?limit=10&offset=30&filters%5Bstatus%5D=failed",
),
((None, 30, None), "?offset=30"),
((10, None, None), "?limit=10"),
(
(
None,
None,
TestRunResultFilters(status=[TestRunResultStatus.FAILED]),
),
"?filters%5Bstatus%5D=failed",
),
],
)
def test_get_all_test_run_results(client, params, query):
response = _status_true(_list(_test_run_result()))
with requests_mock.Mocker() as m:
m.get(client._path("result/CODE"), json=response)
data = client.results.get_all("CODE", *params)
assert data == converter.structure(
response.get("result"), TestRunResultList
)
res = client.results._last_res
assert res.url == client._path("result/CODE" + query)
def test_get_specific_test_run_result(client):
response = _status_true(_test_run_result())
with requests_mock.Mocker() as m:
m.get(client._path("result/CODE/6efce6e4"), json=response)
data = client.results.get("CODE", "6efce6e4")
assert data == converter.structure(
response.get("result"), TestRunResultInfo
)
res = client.results._last_res
assert res.url == client._path("result/CODE/6efce6e4")
def test_create_new_test_run_result(client):
response = _status_true({"hash": "6efce6e4"})
with requests_mock.Mocker() as m:
m.post(client._path("result/CODE/123"), json=response)
create_data = TestRunResultCreate(123, TestRunResultStatus.BLOCKED)
data = client.results.create("CODE", 123, create_data)
assert data == converter.structure(
response.get("result"), TestRunResultCreated
)
res = client.results._last_res
assert json.loads(res.request.body) == converter.unstructure(
create_data
)
def test_update_test_run_result(client):
response = _status_true({"hash": "6efce6e4"})
with requests_mock.Mocker() as m:
m.patch(client._path("result/CODE/123/6efce6e4"), json=response)
update_data = TestRunResultUpdate(TestRunResultStatus.BLOCKED)
data = client.results.update("CODE", 123, "6efce6e4", update_data)
assert data == converter.structure(
response.get("result"), TestRunResultCreated
)
res = client.results._last_res
assert res.url == client._path("result/CODE/123/6efce6e4")
assert json.loads(res.request.body) == converter.unstructure(
update_data
)
def test_delete_test_run_result(client):
with requests_mock.Mocker() as m:
m.delete(
client._path("result/CODE/123/6efce6e4"), json={"status": True}
)
data = client.results.delete("CODE", 123, "6efce6e4")
assert data is None
res = client.results._last_res
assert res.url == client._path("result/CODE/123/6efce6e4")
| 33
| 75
| 0.629283
|
import json
import pytest
import requests_mock
from apitist.constructor import converter
from tests.data import _list, _status_true, _test_run_result
from qaseio.client.models import (
TestRunResultCreate,
TestRunResultCreated,
TestRunResultFilters,
TestRunResultInfo,
TestRunResultList,
TestRunResultStatus,
TestRunResultUpdate,
)
@pytest.mark.parametrize(
"params, query",
[
(
(
10,
30,
TestRunResultFilters(status=[TestRunResultStatus.FAILED]),
),
"?limit=10&offset=30&filters%5Bstatus%5D=failed",
),
((None, 30, None), "?offset=30"),
((10, None, None), "?limit=10"),
(
(
None,
None,
TestRunResultFilters(status=[TestRunResultStatus.FAILED]),
),
"?filters%5Bstatus%5D=failed",
),
],
)
def test_get_all_test_run_results(client, params, query):
response = _status_true(_list(_test_run_result()))
with requests_mock.Mocker() as m:
m.get(client._path("result/CODE"), json=response)
data = client.results.get_all("CODE", *params)
assert data == converter.structure(
response.get("result"), TestRunResultList
)
res = client.results._last_res
assert res.url == client._path("result/CODE" + query)
def test_get_specific_test_run_result(client):
response = _status_true(_test_run_result())
with requests_mock.Mocker() as m:
m.get(client._path("result/CODE/6efce6e4"), json=response)
data = client.results.get("CODE", "6efce6e4")
assert data == converter.structure(
response.get("result"), TestRunResultInfo
)
res = client.results._last_res
assert res.url == client._path("result/CODE/6efce6e4")
def test_create_new_test_run_result(client):
response = _status_true({"hash": "6efce6e4"})
with requests_mock.Mocker() as m:
m.post(client._path("result/CODE/123"), json=response)
create_data = TestRunResultCreate(123, TestRunResultStatus.BLOCKED)
data = client.results.create("CODE", 123, create_data)
assert data == converter.structure(
response.get("result"), TestRunResultCreated
)
res = client.results._last_res
assert json.loads(res.request.body) == converter.unstructure(
create_data
)
def test_update_test_run_result(client):
response = _status_true({"hash": "6efce6e4"})
with requests_mock.Mocker() as m:
m.patch(client._path("result/CODE/123/6efce6e4"), json=response)
update_data = TestRunResultUpdate(TestRunResultStatus.BLOCKED)
data = client.results.update("CODE", 123, "6efce6e4", update_data)
assert data == converter.structure(
response.get("result"), TestRunResultCreated
)
res = client.results._last_res
assert res.url == client._path("result/CODE/123/6efce6e4")
assert json.loads(res.request.body) == converter.unstructure(
update_data
)
def test_delete_test_run_result(client):
with requests_mock.Mocker() as m:
m.delete(
client._path("result/CODE/123/6efce6e4"), json={"status": True}
)
data = client.results.delete("CODE", 123, "6efce6e4")
assert data is None
res = client.results._last_res
assert res.url == client._path("result/CODE/123/6efce6e4")
| true
| true
|
790b6eb990f7c1e45ffe1e637b1c478df59fb388
| 5,598
|
py
|
Python
|
Lib/distutils/command/build.py
|
arvindm95/unladen-swallow
|
8175e37eaea7ca66ed03283b46bc1d2db0d3f9c3
|
[
"PSF-2.0"
] | 2,293
|
2015-01-02T12:46:10.000Z
|
2022-03-29T09:45:43.000Z
|
python/src/Lib/distutils/command/build.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 315
|
2015-05-31T11:55:46.000Z
|
2022-01-12T08:36:37.000Z
|
python/src/Lib/distutils/command/build.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,033
|
2015-01-04T07:48:40.000Z
|
2022-03-24T09:34:37.000Z
|
"""distutils.command.build
Implements the Distutils 'build' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build.py 62197 2008-04-07 01:53:39Z mark.hammond $"
import sys, os
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build (Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
('compiler=', 'c',
"specify the compiler type"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.plat_name = None
self.debug = None
self.force = 0
self.executable = None
def finalize_options (self):
if self.plat_name is None:
self.plat_name = get_platform()
else:
# plat-name only supported for windows (other platforms are
# supported via ./configure flags, if at all). Avoid misleading
# other platforms.
if os.name != 'nt':
raise DistutilsOptionError(
"--plat-name only supported on Windows (try "
"using './configure --help' on your platform)")
plat_specifier = ".%s-%s" % (self.plat_name, sys.version[0:3])
# Make it so Python 2.x and Python 2.x with --with-pydebug don't
# share the same build directories. Doing so confuses the build
# process for C modules
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
if self.executable is None:
self.executable = os.path.normpath(sys.executable)
# finalize_options ()
def run (self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules (self):
return self.distribution.has_pure_modules()
def has_c_libraries (self):
return self.distribution.has_c_libraries()
def has_ext_modules (self):
return self.distribution.has_ext_modules()
def has_scripts (self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
# class build
| 35.207547
| 76
| 0.58771
|
__revision__ = "$Id: build.py 62197 2008-04-07 01:53:39Z mark.hammond $"
import sys, os
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from distutils.util import get_platform
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build (Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('plat-name=', 'p',
"platform name to build for, if supported "
"(default: %s)" % get_platform()),
('compiler=', 'c',
"specify the compiler type"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.build_base = 'build'
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.plat_name = None
self.debug = None
self.force = 0
self.executable = None
def finalize_options (self):
if self.plat_name is None:
self.plat_name = get_platform()
else:
if os.name != 'nt':
raise DistutilsOptionError(
"--plat-name only supported on Windows (try "
"using './configure --help' on your platform)")
plat_specifier = ".%s-%s" % (self.plat_name, sys.version[0:3])
# share the same build directories. Doing so confuses the build
# process for C modules
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
if self.executable is None:
self.executable = os.path.normpath(sys.executable)
def run (self):
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
def has_pure_modules (self):
return self.distribution.has_pure_modules()
def has_c_libraries (self):
return self.distribution.has_c_libraries()
def has_ext_modules (self):
return self.distribution.has_ext_modules()
def has_scripts (self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
| true
| true
|
790b6ebad99b78cef2085b5391f29db429f6fbbd
| 14,610
|
py
|
Python
|
src/python/pants/java/nailgun_executor.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/java/nailgun_executor.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/java/nailgun_executor.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import hashlib
import logging
import os
import re
import selectors
import threading
import time
from contextlib import closing
from pants.base.build_environment import get_buildroot
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.nailgun_client import NailgunClient
from pants.pantsd.process_manager import FingerprintedProcessManager, ProcessGroup
from pants.util.collections import ensure_str_list
from pants.util.dirutil import read_file, safe_file_dump, safe_open
from pants.util.memo import memoized_classproperty
logger = logging.getLogger(__name__)
class NailgunProcessGroup(ProcessGroup):
_NAILGUN_KILL_LOCK = threading.Lock()
def __init__(self, metadata_base_dir=None):
super().__init__(name="nailgun", metadata_base_dir=metadata_base_dir)
# TODO: this should enumerate the .pids dir first, then fallback to ps enumeration (& warn).
def _iter_nailgun_instances(self, everywhere=False):
def predicate(proc):
if proc.name() == NailgunExecutor._PROCESS_NAME:
if not everywhere:
return NailgunExecutor._PANTS_NG_BUILDROOT_ARG in proc.cmdline()
else:
return any(
arg.startswith(NailgunExecutor._PANTS_NG_ARG_PREFIX)
for arg in proc.cmdline()
)
return self.iter_instances(predicate)
def killall(self, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
"""
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info("killing nailgun server pid={pid}".format(pid=proc.pid))
proc.terminate()
# TODO: Once we integrate standard logging into our reporting framework, we can consider making
# some of the log.debug() below into log.info(). Right now it just looks wrong on the console.
class NailgunExecutor(Executor, FingerprintedProcessManager):
"""Executes java programs by launching them in nailgun server.
If a nailgun is not available for a given set of jvm args and classpath, one is launched and re-
used for the given jvm args and classpath on subsequent runs.
"""
# 'NGServer 0.9.1 started on 127.0.0.1, port 53785.'
_NG_PORT_REGEX = re.compile(r".*\s+port\s+(\d+)\.$")
# Used to identify if we own a given nailgun server.
FINGERPRINT_CMD_KEY = "-Dpants.nailgun.fingerprint"
_PANTS_NG_ARG_PREFIX = "-Dpants.buildroot"
_PANTS_OWNER_ARG_PREFIX = "-Dpants.nailgun.owner"
@memoized_classproperty
def _PANTS_NG_BUILDROOT_ARG(cls):
return "=".join((cls._PANTS_NG_ARG_PREFIX, get_buildroot()))
_NAILGUN_SPAWN_LOCK = threading.Lock()
_PROCESS_NAME = "java"
def __init__(
self,
identity,
workdir,
nailgun_classpath,
distribution,
startup_timeout=10,
connect_timeout=10,
connect_attempts=5,
metadata_base_dir=None,
):
Executor.__init__(self, distribution=distribution)
FingerprintedProcessManager.__init__(
self,
name=identity,
process_name=self._PROCESS_NAME,
metadata_base_dir=metadata_base_dir,
)
if not isinstance(workdir, str):
raise ValueError(
"Workdir must be a path string, not: {workdir}".format(workdir=workdir)
)
self._identity = identity
self._workdir = workdir
self._ng_stdout = os.path.join(workdir, "stdout")
self._ng_stderr = os.path.join(workdir, "stderr")
self._nailgun_classpath = ensure_str_list(nailgun_classpath)
self._startup_timeout = startup_timeout
self._connect_timeout = connect_timeout
self._connect_attempts = connect_attempts
def __str__(self):
return "NailgunExecutor({identity}, dist={dist}, pid={pid} socket={socket})".format(
identity=self._identity, dist=self._distribution, pid=self.pid, socket=self.socket
)
def _create_owner_arg(self, workdir):
# Currently the owner is identified via the full path to the workdir.
return "=".join((self._PANTS_OWNER_ARG_PREFIX, workdir))
def _create_fingerprint_arg(self, fingerprint):
return "=".join((self.FINGERPRINT_CMD_KEY, fingerprint))
@staticmethod
def _fingerprint(jvm_options, classpath, java_version):
"""Compute a fingerprint for this invocation of a Java task.
:param list jvm_options: JVM options passed to the java invocation
:param list classpath: The -cp arguments passed to the java invocation
:param Revision java_version: return value from Distribution.version()
:return: a hexstring representing a fingerprint of the java invocation
"""
digest = hashlib.sha1()
# TODO(John Sirois): hash classpath contents?
encoded_jvm_options = [option.encode() for option in sorted(jvm_options)]
encoded_classpath = [cp.encode() for cp in sorted(classpath)]
encoded_java_version = repr(java_version).encode()
for item in (encoded_jvm_options, encoded_classpath, encoded_java_version):
digest.update(str(item).encode())
return digest.hexdigest()
def _runner(self, classpath, main, jvm_options, args):
"""Runner factory.
Called via Executor.execute().
"""
command = self._create_command(classpath, main, jvm_options, args)
class Runner(self.Runner):
@property
def executor(this):
return self
@property
def command(self):
return list(command)
def run(this, stdout=None, stderr=None, stdin=None, cwd=None):
nailgun = None
try:
nailgun = self._get_nailgun_client(
jvm_options, classpath, stdout, stderr, stdin
)
logger.debug(
"Executing via {ng_desc}: {cmd}".format(ng_desc=nailgun, cmd=this.cmd)
)
return nailgun.execute(main, cwd, *args)
except (NailgunClient.NailgunError, self.InitialNailgunConnectTimedOut) as e:
self.terminate()
raise self.Error(
"Problem launching via {ng_desc} command {main} {args}: {msg}".format(
ng_desc=nailgun or "<no nailgun connection>",
main=main,
args=" ".join(args),
msg=e,
)
)
return Runner()
def _check_nailgun_state(self, new_fingerprint):
running = self.is_alive()
updated = self.needs_restart(new_fingerprint)
logging.debug(
"Nailgun {nailgun} state: updated={up!s} running={run!s} fingerprint={old_fp} "
"new_fingerprint={new_fp} distribution={old_dist} new_distribution={new_dist}".format(
nailgun=self._identity,
up=updated,
run=running,
old_fp=self.fingerprint,
new_fp=new_fingerprint,
old_dist=self.cmd,
new_dist=self._distribution.java,
)
)
return running, updated
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner.
It handles creation of the running nailgun server as well as creation of the client.
"""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug(
"Found running nailgun server that needs updating, killing {server}".format(
server=self._identity
)
)
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(
new_fingerprint, jvm_options, classpath, stdout, stderr, stdin
)
return self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
class InitialNailgunConnectTimedOut(Exception):
_msg_fmt = """Failed to read nailgun output after {timeout} seconds!
Stdout:
{stdout}
Stderr:
{stderr}"""
def __init__(self, timeout, stdout, stderr):
msg = self._msg_fmt.format(timeout=timeout, stdout=stdout, stderr=stderr)
super(NailgunExecutor.InitialNailgunConnectTimedOut, self).__init__(msg)
def _await_socket(self, timeout):
"""Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun
stdout."""
start_time = time.time()
accumulated_stdout = ""
def calculate_remaining_time():
return time.time() - (start_time + timeout)
def possibly_raise_timeout(remaining_time):
if remaining_time > 0:
stderr = read_file(self._ng_stderr, binary_mode=True)
raise self.InitialNailgunConnectTimedOut(
timeout=timeout, stdout=accumulated_stdout, stderr=stderr,
)
# NB: We use PollSelector, rather than the more efficient DefaultSelector, because
# DefaultSelector results in using the epoll() syscall on Linux, which does not work with
# regular text files like ng_stdout. See https://stackoverflow.com/a/8645770.
with selectors.PollSelector() as selector, safe_open(self._ng_stdout, "r") as ng_stdout:
selector.register(ng_stdout, selectors.EVENT_READ)
while 1:
remaining_time = calculate_remaining_time()
possibly_raise_timeout(remaining_time)
events = selector.select(timeout=-1 * remaining_time)
if events:
line = ng_stdout.readline() # TODO: address deadlock risk here.
try:
return self._NG_PORT_REGEX.match(line).group(1)
except AttributeError:
pass
accumulated_stdout += line
def _create_ngclient(self, port, stdout, stderr, stdin):
return NailgunClient(port=port, ins=stdin, out=stdout, err=stderr)
def ensure_connectable(self, nailgun):
"""Ensures that a nailgun client is connectable or raises NailgunError."""
attempt_count = 1
while 1:
try:
with closing(nailgun.try_connect()) as sock:
logger.debug(
"Verified new ng server is connectable at {}".format(sock.getpeername())
)
return
except nailgun.NailgunConnectionError:
if attempt_count >= self._connect_attempts:
logger.debug(
"Failed to connect to ng after {} attempts".format(self._connect_attempts)
)
raise # Re-raise the NailgunConnectionError which provides more context to the user.
attempt_count += 1
time.sleep(self.WAIT_INTERVAL_SEC)
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin):
"""Synchronously spawn a new nailgun server."""
# Truncate the nailguns stdout & stderr.
safe_file_dump(self._ng_stdout, b"", mode="wb")
safe_file_dump(self._ng_stderr, b"", mode="wb")
jvm_options = jvm_options + [
self._PANTS_NG_BUILDROOT_ARG,
self._create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint),
]
post_fork_child_opts = dict(
fingerprint=fingerprint,
jvm_options=jvm_options,
classpath=classpath,
stdout=stdout,
stderr=stderr,
)
logger.debug(
"Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}".format(
i=self._identity, f=fingerprint, j=jvm_options, cp=classpath
)
)
self.daemon_spawn(post_fork_child_opts=post_fork_child_opts)
# Wait for and write the port information in the parent so we can bail on exception/timeout.
self.await_pid(self._startup_timeout)
self.write_socket(self._await_socket(self._connect_timeout))
logger.debug(
"Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}".format(
i=self._identity, f=fingerprint, pid=self.pid, port=self.socket
)
)
client = self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
self.ensure_connectable(client)
return client
def _check_process_buildroot(self, process):
"""Matches only processes started from the current buildroot."""
return self._PANTS_NG_BUILDROOT_ARG in process.cmdline()
def is_alive(self):
"""A ProcessManager.is_alive() override that ensures buildroot flags are present in the
process command line arguments."""
return super().is_alive(self._check_process_buildroot)
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(
classpath=classpath,
main="com.martiansoftware.nailgun.NGServer",
jvm_options=jvm_options,
args=[":0"],
stdin=safe_open("/dev/null", "r"),
stdout=safe_open(self._ng_stdout, "w"),
stderr=safe_open(self._ng_stderr, "w"),
close_fds=True,
)
self.write_pid(subproc.pid)
| 40.470914
| 105
| 0.624298
|
import hashlib
import logging
import os
import re
import selectors
import threading
import time
from contextlib import closing
from pants.base.build_environment import get_buildroot
from pants.java.executor import Executor, SubprocessExecutor
from pants.java.nailgun_client import NailgunClient
from pants.pantsd.process_manager import FingerprintedProcessManager, ProcessGroup
from pants.util.collections import ensure_str_list
from pants.util.dirutil import read_file, safe_file_dump, safe_open
from pants.util.memo import memoized_classproperty
logger = logging.getLogger(__name__)
class NailgunProcessGroup(ProcessGroup):
_NAILGUN_KILL_LOCK = threading.Lock()
def __init__(self, metadata_base_dir=None):
super().__init__(name="nailgun", metadata_base_dir=metadata_base_dir)
def _iter_nailgun_instances(self, everywhere=False):
def predicate(proc):
if proc.name() == NailgunExecutor._PROCESS_NAME:
if not everywhere:
return NailgunExecutor._PANTS_NG_BUILDROOT_ARG in proc.cmdline()
else:
return any(
arg.startswith(NailgunExecutor._PANTS_NG_ARG_PREFIX)
for arg in proc.cmdline()
)
return self.iter_instances(predicate)
def killall(self, everywhere=False):
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info("killing nailgun server pid={pid}".format(pid=proc.pid))
proc.terminate()
class NailgunExecutor(Executor, FingerprintedProcessManager):
_NG_PORT_REGEX = re.compile(r".*\s+port\s+(\d+)\.$")
FINGERPRINT_CMD_KEY = "-Dpants.nailgun.fingerprint"
_PANTS_NG_ARG_PREFIX = "-Dpants.buildroot"
_PANTS_OWNER_ARG_PREFIX = "-Dpants.nailgun.owner"
@memoized_classproperty
def _PANTS_NG_BUILDROOT_ARG(cls):
return "=".join((cls._PANTS_NG_ARG_PREFIX, get_buildroot()))
_NAILGUN_SPAWN_LOCK = threading.Lock()
_PROCESS_NAME = "java"
def __init__(
self,
identity,
workdir,
nailgun_classpath,
distribution,
startup_timeout=10,
connect_timeout=10,
connect_attempts=5,
metadata_base_dir=None,
):
Executor.__init__(self, distribution=distribution)
FingerprintedProcessManager.__init__(
self,
name=identity,
process_name=self._PROCESS_NAME,
metadata_base_dir=metadata_base_dir,
)
if not isinstance(workdir, str):
raise ValueError(
"Workdir must be a path string, not: {workdir}".format(workdir=workdir)
)
self._identity = identity
self._workdir = workdir
self._ng_stdout = os.path.join(workdir, "stdout")
self._ng_stderr = os.path.join(workdir, "stderr")
self._nailgun_classpath = ensure_str_list(nailgun_classpath)
self._startup_timeout = startup_timeout
self._connect_timeout = connect_timeout
self._connect_attempts = connect_attempts
def __str__(self):
return "NailgunExecutor({identity}, dist={dist}, pid={pid} socket={socket})".format(
identity=self._identity, dist=self._distribution, pid=self.pid, socket=self.socket
)
def _create_owner_arg(self, workdir):
return "=".join((self._PANTS_OWNER_ARG_PREFIX, workdir))
def _create_fingerprint_arg(self, fingerprint):
return "=".join((self.FINGERPRINT_CMD_KEY, fingerprint))
@staticmethod
def _fingerprint(jvm_options, classpath, java_version):
digest = hashlib.sha1()
encoded_jvm_options = [option.encode() for option in sorted(jvm_options)]
encoded_classpath = [cp.encode() for cp in sorted(classpath)]
encoded_java_version = repr(java_version).encode()
for item in (encoded_jvm_options, encoded_classpath, encoded_java_version):
digest.update(str(item).encode())
return digest.hexdigest()
def _runner(self, classpath, main, jvm_options, args):
command = self._create_command(classpath, main, jvm_options, args)
class Runner(self.Runner):
@property
def executor(this):
return self
@property
def command(self):
return list(command)
def run(this, stdout=None, stderr=None, stdin=None, cwd=None):
nailgun = None
try:
nailgun = self._get_nailgun_client(
jvm_options, classpath, stdout, stderr, stdin
)
logger.debug(
"Executing via {ng_desc}: {cmd}".format(ng_desc=nailgun, cmd=this.cmd)
)
return nailgun.execute(main, cwd, *args)
except (NailgunClient.NailgunError, self.InitialNailgunConnectTimedOut) as e:
self.terminate()
raise self.Error(
"Problem launching via {ng_desc} command {main} {args}: {msg}".format(
ng_desc=nailgun or "<no nailgun connection>",
main=main,
args=" ".join(args),
msg=e,
)
)
return Runner()
def _check_nailgun_state(self, new_fingerprint):
running = self.is_alive()
updated = self.needs_restart(new_fingerprint)
logging.debug(
"Nailgun {nailgun} state: updated={up!s} running={run!s} fingerprint={old_fp} "
"new_fingerprint={new_fp} distribution={old_dist} new_distribution={new_dist}".format(
nailgun=self._identity,
up=updated,
run=running,
old_fp=self.fingerprint,
new_fp=new_fingerprint,
old_dist=self.cmd,
new_dist=self._distribution.java,
)
)
return running, updated
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug(
"Found running nailgun server that needs updating, killing {server}".format(
server=self._identity
)
)
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(
new_fingerprint, jvm_options, classpath, stdout, stderr, stdin
)
return self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
class InitialNailgunConnectTimedOut(Exception):
_msg_fmt = """Failed to read nailgun output after {timeout} seconds!
Stdout:
{stdout}
Stderr:
{stderr}"""
def __init__(self, timeout, stdout, stderr):
msg = self._msg_fmt.format(timeout=timeout, stdout=stdout, stderr=stderr)
super(NailgunExecutor.InitialNailgunConnectTimedOut, self).__init__(msg)
def _await_socket(self, timeout):
start_time = time.time()
accumulated_stdout = ""
def calculate_remaining_time():
return time.time() - (start_time + timeout)
def possibly_raise_timeout(remaining_time):
if remaining_time > 0:
stderr = read_file(self._ng_stderr, binary_mode=True)
raise self.InitialNailgunConnectTimedOut(
timeout=timeout, stdout=accumulated_stdout, stderr=stderr,
)
with selectors.PollSelector() as selector, safe_open(self._ng_stdout, "r") as ng_stdout:
selector.register(ng_stdout, selectors.EVENT_READ)
while 1:
remaining_time = calculate_remaining_time()
possibly_raise_timeout(remaining_time)
events = selector.select(timeout=-1 * remaining_time)
if events:
line = ng_stdout.readline()
try:
return self._NG_PORT_REGEX.match(line).group(1)
except AttributeError:
pass
accumulated_stdout += line
def _create_ngclient(self, port, stdout, stderr, stdin):
return NailgunClient(port=port, ins=stdin, out=stdout, err=stderr)
def ensure_connectable(self, nailgun):
attempt_count = 1
while 1:
try:
with closing(nailgun.try_connect()) as sock:
logger.debug(
"Verified new ng server is connectable at {}".format(sock.getpeername())
)
return
except nailgun.NailgunConnectionError:
if attempt_count >= self._connect_attempts:
logger.debug(
"Failed to connect to ng after {} attempts".format(self._connect_attempts)
)
raise
attempt_count += 1
time.sleep(self.WAIT_INTERVAL_SEC)
def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin):
safe_file_dump(self._ng_stdout, b"", mode="wb")
safe_file_dump(self._ng_stderr, b"", mode="wb")
jvm_options = jvm_options + [
self._PANTS_NG_BUILDROOT_ARG,
self._create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint),
]
post_fork_child_opts = dict(
fingerprint=fingerprint,
jvm_options=jvm_options,
classpath=classpath,
stdout=stdout,
stderr=stderr,
)
logger.debug(
"Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}".format(
i=self._identity, f=fingerprint, j=jvm_options, cp=classpath
)
)
self.daemon_spawn(post_fork_child_opts=post_fork_child_opts)
self.await_pid(self._startup_timeout)
self.write_socket(self._await_socket(self._connect_timeout))
logger.debug(
"Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}".format(
i=self._identity, f=fingerprint, pid=self.pid, port=self.socket
)
)
client = self._create_ngclient(port=self.socket, stdout=stdout, stderr=stderr, stdin=stdin)
self.ensure_connectable(client)
return client
def _check_process_buildroot(self, process):
return self._PANTS_NG_BUILDROOT_ARG in process.cmdline()
def is_alive(self):
return super().is_alive(self._check_process_buildroot)
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(
classpath=classpath,
main="com.martiansoftware.nailgun.NGServer",
jvm_options=jvm_options,
args=[":0"],
stdin=safe_open("/dev/null", "r"),
stdout=safe_open(self._ng_stdout, "w"),
stderr=safe_open(self._ng_stderr, "w"),
close_fds=True,
)
self.write_pid(subproc.pid)
| true
| true
|
790b6f9d96e763bc5221f516f7bd3579d162c531
| 6,968
|
py
|
Python
|
src/python/pants/backend/python/util_rules/local_dists.py
|
chebbyChefNEQ/pants
|
a53b9d29a160f36f9af1d1a2c43a693b6a55fa55
|
[
"Apache-2.0"
] | 1
|
2016-04-27T15:35:42.000Z
|
2016-04-27T15:35:42.000Z
|
src/python/pants/backend/python/util_rules/local_dists.py
|
chebbyChefNEQ/pants
|
a53b9d29a160f36f9af1d1a2c43a693b6a55fa55
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/util_rules/local_dists.py
|
chebbyChefNEQ/pants
|
a53b9d29a160f36f9af1d1a2c43a693b6a55fa55
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
import zipfile
from dataclasses import dataclass
from io import BytesIO
from typing import Iterable
from pants.backend.python.subsystems.setuptools import PythonDistributionFieldSet
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import Pex, PexRequest, PexRequirements
from pants.backend.python.util_rules.pex import rules as pex_rules
from pants.backend.python.util_rules.python_sources import PythonSourceFiles
from pants.build_graph.address import Address
from pants.core.goals.package import BuiltPackage, PackageFieldSet
from pants.core.util_rules.source_files import SourceFiles
from pants.engine.addresses import Addresses
from pants.engine.fs import (
EMPTY_SNAPSHOT,
Digest,
DigestContents,
DigestSubset,
MergeDigests,
PathGlobs,
Snapshot,
)
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
from pants.util.docutil import doc_url
from pants.util.meta import frozen_after_init
logger = logging.getLogger(__name__)
@frozen_after_init
@dataclass(unsafe_hash=True)
class LocalDistsPexRequest:
"""Request to build the local dists from the dependency closure of a set of addresses."""
addresses: Addresses
interpreter_constraints: InterpreterConstraints
# The result will return these with the sources provided by the dists subtracted out.
# This will help the caller prevent sources from appearing twice on sys.path.
sources: PythonSourceFiles
def __init__(
self,
addresses: Iterable[Address],
*,
interpreter_constraints: InterpreterConstraints = InterpreterConstraints(),
sources: PythonSourceFiles = PythonSourceFiles(
SourceFiles(EMPTY_SNAPSHOT, tuple()), tuple()
),
) -> None:
self.addresses = Addresses(addresses)
self.interpreter_constraints = interpreter_constraints
self.sources = sources
@dataclass(frozen=True)
class LocalDistsPex:
"""A PEX file containing locally-built dists.
Can be consumed from another PEX, e.g., by adding to PEX_PATH.
Lists the files provided by the dists on sys.path, so they can be subtracted from
sources digests, to prevent the same file ending up on sys.path twice.
"""
pex: Pex
# The sources from the request, but with any files provided by the local dists subtracted out.
remaining_sources: PythonSourceFiles
@rule(desc="Building local distributions")
async def build_local_dists(
request: LocalDistsPexRequest,
) -> LocalDistsPex:
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
applicable_targets = [
tgt for tgt in transitive_targets.closure if PythonDistributionFieldSet.is_applicable(tgt)
]
python_dist_field_sets = [
PythonDistributionFieldSet.create(target) for target in applicable_targets
]
dists = await MultiGet(
[Get(BuiltPackage, PackageFieldSet, field_set) for field_set in python_dist_field_sets]
)
# The primary use-case of the "local dists" feature is to support consuming native extensions
# as wheels without having to publish them first.
# It doesn't seem very useful to consume locally-built sdists, and it makes it hard to
# reason about possible sys.path collisions between the in-repo sources and whatever the
# sdist will place on the sys.path when it's installed.
# So for now we simply ignore sdists, with a warning if necessary.
provided_files = set()
wheels = []
all_contents = await MultiGet(Get(DigestContents, Digest, dist.digest) for dist in dists)
for dist, contents, tgt in zip(dists, all_contents, applicable_targets):
artifacts = set((a.relpath or "") for a in dist.artifacts)
# A given local dist might build a wheel and an sdist (and maybe other artifacts -
# we don't know what setup command was run...)
# As long as there is a wheel, we can ignore the other artifacts.
wheel = next((art for art in artifacts if art.endswith(".whl")), None)
if wheel:
wheel_content = next(content for content in contents if content.path == wheel)
wheels.append(wheel)
buf = BytesIO()
buf.write(wheel_content.content)
buf.seek(0)
with zipfile.ZipFile(buf) as zf:
provided_files.update(zf.namelist())
else:
logger.warning(
f"Encountered a dependency on the {tgt.alias} target at {tgt.address.spec}, but "
"this target does not produce a Python wheel artifact. Therefore this target's "
"code will be used directly from sources, without a distribution being built, "
"and therefore any native extensions in it will not be built.\n\n"
f"See {doc_url('python-distributions')} for details on how to set up a {tgt.alias} "
"target to produce a wheel."
)
dists_digest = await Get(Digest, MergeDigests([dist.digest for dist in dists]))
wheels_digest = await Get(Digest, DigestSubset(dists_digest, PathGlobs(["**/*.whl"])))
dists_pex = await Get(
Pex,
PexRequest(
output_filename="local_dists.pex",
requirements=PexRequirements(wheels),
interpreter_constraints=request.interpreter_constraints,
additional_inputs=wheels_digest,
internal_only=True,
),
)
# We check source roots in reverse lexicographic order,
# so we'll find the innermost root that matches.
source_roots = list(reversed(sorted(request.sources.source_roots)))
remaining_sources = set(request.sources.source_files.files)
unrooted_files_set = set(request.sources.source_files.unrooted_files)
for source in request.sources.source_files.files:
if source not in unrooted_files_set:
for source_root in source_roots:
if (
source.startswith(source_root)
and os.path.relpath(source, source_root) in provided_files
):
remaining_sources.remove(source)
remaining_sources_snapshot = await Get(
Snapshot,
DigestSubset(
request.sources.source_files.snapshot.digest, PathGlobs(sorted(remaining_sources))
),
)
subtracted_sources = PythonSourceFiles(
SourceFiles(remaining_sources_snapshot, request.sources.source_files.unrooted_files),
request.sources.source_roots,
)
return LocalDistsPex(dists_pex, subtracted_sources)
def rules():
return (*collect_rules(), *pex_rules())
| 40.045977
| 100
| 0.708668
|
from __future__ import annotations
import logging
import os
import zipfile
from dataclasses import dataclass
from io import BytesIO
from typing import Iterable
from pants.backend.python.subsystems.setuptools import PythonDistributionFieldSet
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.pex import Pex, PexRequest, PexRequirements
from pants.backend.python.util_rules.pex import rules as pex_rules
from pants.backend.python.util_rules.python_sources import PythonSourceFiles
from pants.build_graph.address import Address
from pants.core.goals.package import BuiltPackage, PackageFieldSet
from pants.core.util_rules.source_files import SourceFiles
from pants.engine.addresses import Addresses
from pants.engine.fs import (
EMPTY_SNAPSHOT,
Digest,
DigestContents,
DigestSubset,
MergeDigests,
PathGlobs,
Snapshot,
)
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
from pants.util.docutil import doc_url
from pants.util.meta import frozen_after_init
logger = logging.getLogger(__name__)
@frozen_after_init
@dataclass(unsafe_hash=True)
class LocalDistsPexRequest:
addresses: Addresses
interpreter_constraints: InterpreterConstraints
sources: PythonSourceFiles
def __init__(
self,
addresses: Iterable[Address],
*,
interpreter_constraints: InterpreterConstraints = InterpreterConstraints(),
sources: PythonSourceFiles = PythonSourceFiles(
SourceFiles(EMPTY_SNAPSHOT, tuple()), tuple()
),
) -> None:
self.addresses = Addresses(addresses)
self.interpreter_constraints = interpreter_constraints
self.sources = sources
@dataclass(frozen=True)
class LocalDistsPex:
pex: Pex
remaining_sources: PythonSourceFiles
@rule(desc="Building local distributions")
async def build_local_dists(
request: LocalDistsPexRequest,
) -> LocalDistsPex:
transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
applicable_targets = [
tgt for tgt in transitive_targets.closure if PythonDistributionFieldSet.is_applicable(tgt)
]
python_dist_field_sets = [
PythonDistributionFieldSet.create(target) for target in applicable_targets
]
dists = await MultiGet(
[Get(BuiltPackage, PackageFieldSet, field_set) for field_set in python_dist_field_sets]
)
# reason about possible sys.path collisions between the in-repo sources and whatever the
# sdist will place on the sys.path when it's installed.
provided_files = set()
wheels = []
all_contents = await MultiGet(Get(DigestContents, Digest, dist.digest) for dist in dists)
for dist, contents, tgt in zip(dists, all_contents, applicable_targets):
artifacts = set((a.relpath or "") for a in dist.artifacts)
# As long as there is a wheel, we can ignore the other artifacts.
wheel = next((art for art in artifacts if art.endswith(".whl")), None)
if wheel:
wheel_content = next(content for content in contents if content.path == wheel)
wheels.append(wheel)
buf = BytesIO()
buf.write(wheel_content.content)
buf.seek(0)
with zipfile.ZipFile(buf) as zf:
provided_files.update(zf.namelist())
else:
logger.warning(
f"Encountered a dependency on the {tgt.alias} target at {tgt.address.spec}, but "
"this target does not produce a Python wheel artifact. Therefore this target's "
"code will be used directly from sources, without a distribution being built, "
"and therefore any native extensions in it will not be built.\n\n"
f"See {doc_url('python-distributions')} for details on how to set up a {tgt.alias} "
"target to produce a wheel."
)
dists_digest = await Get(Digest, MergeDigests([dist.digest for dist in dists]))
wheels_digest = await Get(Digest, DigestSubset(dists_digest, PathGlobs(["**/*.whl"])))
dists_pex = await Get(
Pex,
PexRequest(
output_filename="local_dists.pex",
requirements=PexRequirements(wheels),
interpreter_constraints=request.interpreter_constraints,
additional_inputs=wheels_digest,
internal_only=True,
),
)
source_roots = list(reversed(sorted(request.sources.source_roots)))
remaining_sources = set(request.sources.source_files.files)
unrooted_files_set = set(request.sources.source_files.unrooted_files)
for source in request.sources.source_files.files:
if source not in unrooted_files_set:
for source_root in source_roots:
if (
source.startswith(source_root)
and os.path.relpath(source, source_root) in provided_files
):
remaining_sources.remove(source)
remaining_sources_snapshot = await Get(
Snapshot,
DigestSubset(
request.sources.source_files.snapshot.digest, PathGlobs(sorted(remaining_sources))
),
)
subtracted_sources = PythonSourceFiles(
SourceFiles(remaining_sources_snapshot, request.sources.source_files.unrooted_files),
request.sources.source_roots,
)
return LocalDistsPex(dists_pex, subtracted_sources)
def rules():
return (*collect_rules(), *pex_rules())
| true
| true
|
790b6ff16d3728d62ee998ddd8af602b0035ffcb
| 44,353
|
py
|
Python
|
tensorflow/contrib/learn/python/learn/monitors.py
|
topsun888/tensorflow
|
bad7c50b9dc9789ad7dd0a62daca40b7269841ed
|
[
"Apache-2.0"
] | 1
|
2016-09-29T14:08:17.000Z
|
2016-09-29T14:08:17.000Z
|
tensorflow/contrib/learn/python/learn/monitors.py
|
topsun888/tensorflow
|
bad7c50b9dc9789ad7dd0a62daca40b7269841ed
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/learn/python/learn/monitors.py
|
topsun888/tensorflow
|
bad7c50b9dc9789ad7dd0a62daca40b7269841ed
|
[
"Apache-2.0"
] | 1
|
2020-07-09T22:02:18.000Z
|
2020-07-09T22:02:18.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors allow user instrumentation of the training process.
Monitors are useful to track training, report progress, request early
stopping and more. Monitors use the observer pattern and notify at the following
points:
- when training begins
- before a training step
- after a training step
- when training ends
Monitors are not intended to be reusable.
There are a few pre-defined monitors:
- CaptureVariable: saves a variable's values
- GraphDump: intended for debug only - saves all tensor values
- PrintTensor: outputs one or more tensor values to log
- SummarySaver: saves summaries to a summary writer
- ValidationMonitor: runs model validation, by periodically calculating eval
metrics on a separate data set; supports optional early stopping
For more specific needs, you can create custom monitors by extending one of the
following classes:
- BaseMonitor: the base class for all monitors
- EveryN: triggers a callback every N training steps
Example:
class ExampleMonitor(monitors.BaseMonitor):
def __init__(self):
print 'Init'
def begin(self, max_steps):
print 'Starting run. Will train until step %d.' % max_steps
def end(self):
print 'Completed run.'
def step_begin(self, step):
print 'About to run step %d...' % step
return ['loss_1:0']
def step_end(self, step, outputs):
print 'Done running step %d. The value of "loss" tensor: %s' % (
step, outputs['loss_1:0'])
linear_regressor = LinearRegressor()
example_monitor = ExampleMonitor()
linear_regressor.fit(
x, y, steps=2, batch_size=1, monitors=[example_monitor])
@@get_default_monitors
@@BaseMonitor
@@CaptureVariable
@@CheckpointSaver
@@EveryN
@@ExportMonitor
@@GraphDump
@@LoggingTrainable
@@NanLoss
@@PrintTensor
@@StepCounter
@@StopAtStep
@@SummarySaver
@@ValidationMonitor
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import summary_io
# TODO(ptucker): Split each monitor class into a separate file.
# TODO(ptucker): Fail if epoch or step does not monotonically increase?
class BaseMonitor(object):
"""Base class for Monitors.
Defines basic interfaces of Monitors.
Monitors can either be run on all workers or, more commonly, restricted
to run exclusively on the elected chief worker.
"""
def __init__(self):
self._begun = False
self._current_epoch = None
self._current_step = None
self._max_steps = None
self._estimator = None
self._estimator_locked = False
@property
def run_on_all_workers(self):
return False
def set_estimator(self, estimator):
"""A setter called automatically by the target estimator.
If the estimator is locked, this method does nothing.
Args:
estimator: the estimator that this monitor monitors.
Raises:
ValueError: if the estimator is None.
"""
if self._estimator_locked:
return
if estimator is None:
raise ValueError("Missing estimator.")
# TODO(mdan): This should fail if called twice with the same estimator.
self._estimator = estimator
def _lock_estimator(self):
"""Locks the estimator until _unlock_estimator is called."""
self._estimator_locked = True
def _unlock_estimator(self):
"""Unlocks the estimator."""
self._estimator_locked = False
def begin(self, max_steps=None):
"""Called at the beginning of training.
When called, the default graph is the one we are executing.
Args:
max_steps: `int`, the maximum global step this training will run until.
Raises:
ValueError: if we've already begun a run.
"""
if self._begun:
raise ValueError("begin called twice without end.")
self._max_steps = max_steps
self._begun = True
def end(self, session=None):
"""Callback at the end of training/evaluation.
Args:
session: A `tf.Session` object that can be used to run ops.
Raises:
ValueError: if we've not begun a run.
"""
_ = session
if not self._begun:
raise ValueError("end called without begin.")
self._max_steps = None
self._begun = False
def epoch_begin(self, epoch):
"""Begin epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've already begun an epoch, or `epoch` < 0.
"""
if self._current_epoch is not None:
raise ValueError("epoch_begin called twice without epoch_end.")
if epoch < 0:
raise ValueError("Invalid epoch %s." % epoch)
self._current_epoch = epoch
def epoch_end(self, epoch):
"""End epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've not begun an epoch, or `epoch` number does not match.
"""
if self._current_epoch != epoch:
raise ValueError(
"epoch_end expected %s but got %s.", self._current_epoch, epoch)
self._current_epoch = None
def step_begin(self, step):
"""Callback before training step begins.
You may use this callback to request evaluation of additional tensors
in the graph.
Args:
step: `int`, the current value of the global step.
Returns:
List of `Tensor` objects or string tensor names to be run.
Raises:
ValueError: if we've already begun a step, or `step` < 0, or
`step` > `max_steps`.
"""
if (step < 0) or (
(self._max_steps is not None) and (step > self._max_steps)):
raise ValueError("Invalid step %s." % step)
self._current_step = step
return []
def step_end(self, step, output): # pylint: disable=unused-argument
"""Callback after training step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Note that this method is not called if the call to `Session.run()` that
followed the last call to `step_begin()` failed.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
Raises:
ValueError: if we've not begun a step, or `step` number does not match.
"""
if self._current_step != step:
raise ValueError(
"step_end expected %s but got %s.", self._current_step, step)
self._current_step = None
return False
def post_step(self, step, session): # pylint: disable=unused-argument
"""Callback after the step is finished.
Called after step_end and receives session to perform extra session.run
calls. If failure occurred in the process, will be called as well.
Args:
step: `int`, global step of the model.
session: `Session` object.
"""
_ = step, session
def _extract_output(outputs, request):
if request in outputs:
return outputs[request]
return outputs[request.name]
class EveryN(BaseMonitor):
"""Base class for monitors that execute callbacks every N steps.
This class adds three new callbacks:
- every_n_step_begin
- every_n_step_end
- every_n_post_step
The callbacks are executed every n steps, or optionally every step for the
first m steps, where m and n can both be user-specified.
When extending this class, note that if you wish to use any of the
`BaseMonitor` callbacks, you must call their respective super implementation:
def step_begin(self, step):
super(ExampleMonitor, self).step_begin(step)
return []
Failing to call the super implementation will cause unpredictible behavior.
The `every_n_post_step()` callback is also called after the last step if it
was not already called through the regular conditions. Note that
`every_n_step_begin()` and `every_n_step_end()` do not receive that special
treatment.
"""
# TODO(ipolosukhin): Add also every n seconds.
def __init__(self, every_n_steps=100, first_n_steps=1):
"""Initializes an `EveryN` monitor.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
first_n_steps: `int`, specifying the number of initial steps during
which the callbacks will always be executed, regardless of the value
of `every_n_steps`. Note that this value is relative to the global step
"""
super(EveryN, self).__init__()
self._every_n_steps = every_n_steps
self._first_n_steps = first_n_steps
# Last step in the model.
self._last_successful_step = None
# Last step at which we called one of the every_n methods
self._last_active_step = 0
self._every_n_step_begin_called = False
def every_n_step_begin(self, step): # pylint: disable=unused-argument
"""Callback before every n'th step begins.
Args:
step: `int`, the current value of the global step.
Returns:
A `list` of tensors that will be evaluated at this step.
"""
return []
def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument
"""Callback after every n'th step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Args:
step: `int`, the current value of the global step.
outputs: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
"""
return False
def every_n_post_step(self, step, session):
"""Callback after a step is finished or `end()` is called.
Args:
step: `int`, the current value of the global step.
session: `Session` object.
"""
pass
def step_begin(self, step):
"""Overrides `BaseMonitor.step_begin`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
Returns:
A `list`, the result of every_n_step_begin, if that was called this step,
or an empty list otherwise.
Raises:
ValueError: if called more than once during a step.
"""
super(EveryN, self).step_begin(step)
if (step <= self._first_n_steps or
step >= (self._every_n_steps + self._last_active_step) or
step == self._max_steps): # Note: max_steps can be None here.
self._every_n_step_begin_called = True
return self.every_n_step_begin(step)
self._every_n_step_begin_called = False
return []
def step_end(self, step, output):
"""Overrides `BaseMonitor.step_end`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`, the result of every_n_step_end, if that was called this step,
or `False` otherwise.
"""
super(EveryN, self).step_end(step, output)
if self._every_n_step_begin_called:
return self.every_n_step_end(step, output)
return False
def post_step(self, step, session):
super(EveryN, self).post_step(step, session)
if self._every_n_step_begin_called:
self.every_n_post_step(step, session)
self._last_active_step = step
self._last_successful_step = step
def end(self, session=None):
super(EveryN, self).end(session=session)
if self._last_successful_step != self._last_active_step:
self.every_n_post_step(self._last_successful_step, session)
class StopAtStep(BaseMonitor):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep monitor.
This monitor requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `step_begin()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(StopAtStep, self).__init__()
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
@property
def run_on_all_workers(self):
return True
def step_begin(self, step):
super(StopAtStep, self).step_begin(step)
if self._last_step is None:
self._last_step = step + self._num_steps - 1
return []
def step_end(self, step, output):
super(StopAtStep, self).step_end(step, output)
return step >= self._last_step
# TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout.
class PrintTensor(EveryN):
"""Prints given tensors every N steps.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensor_names, every_n=100, first_n=1):
"""Initializes a PrintTensor monitor.
Args:
tensor_names: `dict` of tag to tensor names or
`iterable` of tensor names (strings).
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(PrintTensor, self).__init__(every_n, first_n)
if not isinstance(tensor_names, dict):
tensor_names = {item: item for item in tensor_names}
self._tensor_names = tensor_names
def every_n_step_begin(self, step):
super(PrintTensor, self).every_n_step_begin(step)
return list(self._tensor_names.values())
def every_n_step_end(self, step, outputs):
super(PrintTensor, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._tensor_names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Step %d: %s", step, ", ".join(stats))
class LoggingTrainable(EveryN):
"""Writes trainable variable values into log every N steps.
Write the tensors in trainable variables `every_n` steps,
starting with the `first_n`th step.
"""
def __init__(self, scope=None, every_n=100, first_n=1):
"""Initializes LoggingTrainable monitor.
Args:
scope: An optional string to match variable names using re.match.
every_n: Print every N steps.
first_n: Print first N steps.
"""
super(LoggingTrainable, self).__init__(every_n, first_n)
self._scope = scope
def every_n_step_begin(self, step):
super(LoggingTrainable, self).every_n_step_begin(step)
# Get a list of trainable variables at the begining of every N steps.
# We cannot get this in __init__ because train_op has not been generated.
trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
scope=self._scope)
self._names = {}
for var in trainables:
self._names[var.name] = var.value().name
return list(self._names.values())
def every_n_step_end(self, step, outputs):
super(LoggingTrainable, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats))
class SummarySaver(EveryN):
"""Saves summaries every N steps."""
def __init__(self,
summary_op,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None):
"""Initializes a `SummarySaver` monitor.
Args:
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
"""
# TODO(ipolosukhin): Implement every N seconds.
super(SummarySaver, self).__init__(every_n_steps=save_steps)
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = summary_io.SummaryWriter(output_dir)
self._scaffold = scaffold
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def set_estimator(self, estimator):
super(SummarySaver, self).set_estimator(estimator)
# TODO(mdan): This line looks redundant.
if self._summary_writer is None:
self._summary_writer = summary_io.SummaryWriter(estimator.model_dir)
def every_n_step_begin(self, step):
super(SummarySaver, self).every_n_step_begin(step)
if self._summary_op is None and self._scaffold is not None:
self._summary_op = self._scaffold.summary_op
if self._summary_op is not None:
return [self._summary_op]
return []
def every_n_step_end(self, step, outputs):
super(SummarySaver, self).every_n_step_end(step, outputs)
if self._summary_op is not None:
summary_strs = _extract_output(outputs, self._summary_op)
if self._summary_writer:
self._summary_writer.add_summary(summary_strs, step)
return False
def end(self, session=None):
super(SummarySaver, self).end(session=session)
if self._summary_writer:
self._summary_writer.flush()
class ValidationMonitor(EveryN):
"""Runs evaluation of a given estimator, at most every N steps.
Note that the evaluation is done based on the saved checkpoint, which will
usually be older than the current step.
Can do early stopping on validation metrics if `early_stopping_rounds` is
provided.
"""
def __init__(self, x=None, y=None, input_fn=None, batch_size=None,
eval_steps=None,
every_n_steps=100, metrics=None, early_stopping_rounds=None,
early_stopping_metric="loss",
early_stopping_metric_minimize=True, name=None):
"""Initializes a ValidationMonitor.
Args:
x: See `BaseEstimator.evaluate`.
y: See `BaseEstimator.evaluate`.
input_fn: See `BaseEstimator.evaluate`.
batch_size: See `BaseEstimator.evaluate`.
eval_steps: See `BaseEstimator.evaluate`.
every_n_steps: Check for new checkpoints to evaluate every N steps. If a
new checkpoint is found, it is evaluated. See `EveryN`.
metrics: See `BaseEstimator.evaluate`.
early_stopping_rounds: `int`. If the metric indicated by
`early_stopping_metric` does not change according to
`early_stopping_metric_minimize` for this many steps, then training
will be stopped.
early_stopping_metric: `string`, name of the metric to check for early
stopping.
early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is
expected to decrease (thus early stopping occurs when this metric
stops decreasing), False if `early_stopping_metric` is expected to
increase. Typically, `early_stopping_metric_minimize` is True for
loss metrics like mean squared error, and False for performance
metrics like accuracy.
name: See `BaseEstimator.evaluate`.
Raises:
ValueError: If both x and input_fn are provided.
"""
super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps,
first_n_steps=-1)
# TODO(mdan): Checks like this are already done by evaluate.
if x is None and input_fn is None:
raise ValueError("Either x or input_fn should be provided.")
self.x = x
self.y = y
self.input_fn = input_fn
self.batch_size = batch_size
self.eval_steps = eval_steps
self.metrics = metrics
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_metric = early_stopping_metric
self.early_stopping_metric_minimize = early_stopping_metric_minimize
self.name = name
self._best_value_step = None
self._best_value = None
self._early_stopped = False
self._latest_path = None
self._latest_path_step = None
@property
def early_stopped(self):
"""Returns True if this monitor caused an early stop."""
return self._early_stopped
@property
def best_step(self):
"""Returns the step at which the best early stopping metric was found."""
return self._best_value_step
@property
def best_value(self):
"""Returns the best early stopping metric value found so far."""
return self._best_value
def every_n_step_end(self, step, outputs):
super(ValidationMonitor, self).every_n_step_end(step, outputs)
# TODO(mdan): The use of step below is probably misleading.
# The code should probably use the step from the checkpoint, because
# that's what is being evaluated.
if self._estimator is None:
raise ValueError("Missing call to set_estimator.")
# Check that we are not running evaluation on the same checkpoint.
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.debug("Skipping evaluation since model has not been saved yet "
"at step %d.", step)
return False
if latest_path is not None and latest_path == self._latest_path:
logging.debug("Skipping evaluation due to same checkpoint %s for step %d "
"as for step %d.", latest_path, step,
self._latest_path_step)
return False
self._latest_path = latest_path
self._latest_path_step = step
# Run evaluation and log it.
validation_outputs = self._estimator.evaluate(
x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size,
steps=self.eval_steps, metrics=self.metrics, name=self.name)
stats = []
for name in validation_outputs:
stats.append("%s = %s" % (name, str(validation_outputs[name])))
logging.info("Validation (step %d): %s", step, ", ".join(stats))
# Early stopping logic.
if self.early_stopping_rounds is not None:
if self.early_stopping_metric not in validation_outputs:
raise ValueError("Metric %s missing from outputs %s." % (
self.early_stopping_metric, set(validation_outputs.keys())))
current_value = validation_outputs[self.early_stopping_metric]
if (self._best_value is None or (self.early_stopping_metric_minimize and
(current_value < self._best_value)) or
(not self.early_stopping_metric_minimize and
(current_value > self._best_value))):
self._best_value = current_value
self._best_value_step = step
stop_now = (step - self._best_value_step >= self.early_stopping_rounds)
if stop_now:
logging.info("Stopping. Best step: {} with {} = {}."
.format(self._best_value_step,
self.early_stopping_metric, self._best_value))
self._early_stopped = True
return True
return False
# TODO(ptucker): This really reads any tensor, not just vars, and requires the
# ':0' suffix on var_name.
class CaptureVariable(EveryN):
"""Captures a variable's values into a collection.
This monitor is useful for unit testing. You should exercise caution when
using this monitor in production, since it never discards values.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
"""
def __init__(self, var_name, every_n=100, first_n=1):
"""Initializes a CaptureVariable monitor.
Args:
var_name: `string`. The variable name, including suffix (typically ":0").
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(CaptureVariable, self).__init__(every_n, first_n)
self._var_name = var_name
self._var_values = {}
@property
def values(self):
"""Returns the values captured so far.
Returns:
`dict` mapping `int` step numbers to that values of the variable at the
respective step.
"""
return self._var_values
def every_n_step_begin(self, step):
super(CaptureVariable, self).every_n_step_begin(step)
return [self._var_name]
def every_n_step_end(self, step, outputs):
super(CaptureVariable, self).every_n_step_end(step, outputs)
self._var_values[step] = _extract_output(outputs, self._var_name)
def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100,
output_dir=None, summary_writer=None):
"""Returns a default set of typically-used monitors.
Args:
loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`
at the default interval.
summary_op: See `SummarySaver`.
save_summary_steps: See `SummarySaver`.
output_dir: See `SummarySaver`.
summary_writer: See `SummarySaver`.
Returns:
`list` of monitors.
"""
monitors = []
if loss_op is not None:
monitors.append(PrintTensor(tensor_names={"loss": loss_op.name}))
if summary_op is not None:
monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps,
output_dir=output_dir,
summary_writer=summary_writer))
return monitors
class GraphDump(BaseMonitor):
"""Dumps almost all tensors in the graph at every step.
Note, this is very expensive, prefer `PrintTensor` in production.
"""
IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder",
"RandomUniform", "Cast", "RestoreSlice"]
def __init__(self, ignore_ops=None):
"""Initializes GraphDump monitor.
Args:
ignore_ops: `list` of `string`. Names of ops to ignore.
If None, `GraphDump.IGNORE_OPS` is used.
"""
super(GraphDump, self).__init__()
self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS
self._data = {}
def begin(self, max_steps=None):
super(GraphDump, self).begin(max_steps=max_steps)
self._tensors = []
graph = ops.get_default_graph()
graph_def = graph.as_graph_def()
for node in graph_def.node:
if node.op in self._ignore_ops:
continue
logging.info("op=%s name=%s.", node.op, node.name)
try:
self._tensors.append(graph.get_tensor_by_name(node.name + ":0"))
except KeyError:
pass
def step_begin(self, step):
super(GraphDump, self).step_begin(step)
return self._tensors
def step_end(self, step, output):
super(GraphDump, self).step_end(step, output)
self._data[step] = output
@property
def data(self):
return self._data
# TODO(ptucker): Handle keys that are in one but not the other.
def compare(self, other_dump, step, atol=1e-06):
"""Compares two `GraphDump` monitors and returns differences.
Args:
other_dump: Another `GraphDump` monitor.
step: `int`, step to compare on.
atol: `float`, absolute tolerance in comparison of floating arrays.
Returns:
Returns tuple:
matched: `list` of keys that matched.
non_matched: `dict` of keys to tuple of 2 mismatched values.
Raises:
ValueError: if a key in `data` is missing from `other_dump` at `step`.
"""
non_matched = {}
matched = []
this_output = self.data[step] if step in self.data else {}
other_output = other_dump.data[step] if step in other_dump.data else {}
for key in this_output:
if not isinstance(key, str) and not isinstance(key, unicode):
continue
if key not in other_output:
raise ValueError("%s missing at step %s.", (key, step))
value1 = _extract_output(this_output, key)
value2 = _extract_output(other_output, key)
if isinstance(value1, str):
continue
if isinstance(value1, np.ndarray):
if not np.allclose(value1, value2, atol=atol):
non_matched[key] = value1 - value2
else:
matched.append(key)
else:
if value1 != value2:
non_matched[key] = (value1, value2)
else:
matched.append(key)
return matched, non_matched
class ExportMonitor(EveryN):
"""Monitor that exports Estimator every N steps."""
# TODO(philstahlfeld): Investigate switching export.export_estimator
# configuration values to **kwargs so that updates to the export_estimator
# function don't have to be reflected here.
@deprecated_arg_values(
"2016-09-23",
"The signature of the input_fn accepted by export is changing to be "
"consistent with what's used by tf.Learn Estimator's train/evaluate. "
"input_fn (and in most cases, input_feature_key) will both become "
"required args.",
input_fn=None)
def __init__(self,
every_n_steps,
export_dir,
input_fn=None,
input_feature_key=None,
exports_to_keep=5,
signature_fn=None,
default_batch_size=1):
"""Initializes ExportMonitor.
Args:
every_n_steps: Run monitor every N steps.
export_dir: str, folder to export.
input_fn: A function that takes no argument and returns a tuple of
(features, targets), where features is a dict of string key to `Tensor`
and targets is a `Tensor` that's currently not used (and so can be
`None`).
input_feature_key: String key into the features dict returned by
`input_fn` that corresponds to the raw `Example` strings `Tensor` that
the exported model will take as input. Can only be `None` if you're
using a custom `signature_fn` that does not use the first arg
(examples).
exports_to_keep: int, number of exports to keep.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `dict` of `Tensor`s for predictions.
default_batch_size: Default batch size of the `Example` placeholder.
Raises:
ValueError: If `input_fn` and `input_feature_key` are not both defined or
are not both `None`.
"""
super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)
self._export_dir = export_dir
self._input_fn = input_fn
self._input_feature_key = input_feature_key
self._use_deprecated_input_fn = input_fn is None
self._exports_to_keep = exports_to_keep
self._signature_fn = signature_fn
self._default_batch_size = default_batch_size
self._last_export_dir = None
@property
def export_dir(self):
return self._export_dir
@property
def exports_to_keep(self):
return self._exports_to_keep
@property
def signature_fn(self):
return self._signature_fn
@property
def last_export_dir(self):
"""Returns the directory containing the last completed export.
Returns:
The string path to the exported directory. NB: this functionality was
added on 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because the
estimator being fitted does not yet return a value during export.
"""
return self._last_export_dir
def every_n_step_end(self, step, outputs):
super(ExportMonitor, self).every_n_step_end(step, outputs)
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
# Currently we are not syncronized with saving checkpoints, which leads to
# runtime errors when we are calling export on the same global step.
# Exports depend on saved checkpoints for constructing the graph and
# getting the global step from the graph instance saved in the checkpoint.
# If the checkpoint is stale with respect to current step, the global step
# is taken to be the last saved checkpoint's global step and exporter
# doesn't export the same checkpoint again with the following error.
logging.info("Skipping exporting because the existing checkpoint has "
"already been exported. "
"Consider exporting less frequently.")
def end(self, session=None):
super(ExportMonitor, self).end(session=session)
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.info("Skipping export at the end since model has not been saved "
"yet.")
return
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
logging.info("Skipping exporting for the same step.")
class CheckpointSaver(BaseMonitor):
"""Saves checkpoints every N steps."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaver monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: If both `save_steps` and `save_secs` are not `None`.
ValueError: If both `save_steps` and `save_secs` are `None`.
"""
logging.info("Create CheckpointSaver.")
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self, max_steps=None):
super(CheckpointSaver, self).begin(max_steps)
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
def step_begin(self, step):
super(CheckpointSaver, self).step_begin(step)
self._last_begin_step = step
def post_step(self, step, session):
super(CheckpointSaver, self).post_step(step, session)
if self._last_saved_time is None:
self._save(step, session)
if self._save_steps is not None:
if step >= self._last_saved_step + self._save_steps:
self._save(step, session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(step, session)
def end(self, session=None):
super(CheckpointSaver, self).end(session)
self._save(self._last_begin_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounter(EveryN):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None,
summary_writer=None):
super(StepCounter, self).__init__(every_n_steps=every_n_steps)
self._summary_tag = "global_step/sec"
self._last_reported_step = None
self._last_reported_time = None
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def set_estimator(self, estimator):
super(StepCounter, self).set_estimator(estimator)
if self._summary_writer is None:
self._summary_writer = SummaryWriterCache.get(estimator.model_dir)
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
if self._last_reported_time is not None and self._summary_writer:
added_steps = current_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, current_step)
self._last_reported_step = current_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanLoss(EveryN):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
every_n_steps: `int`, run check every this many steps.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
super(NanLoss, self).__init__(every_n_steps=every_n_steps)
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def every_n_step_begin(self, step):
super(NanLoss, self).every_n_step_begin(step)
return [self._loss_tensor]
def every_n_step_end(self, step, outputs):
super(NanLoss, self).every_n_step_end(step, outputs)
if np.isnan(_extract_output(outputs, self._loss_tensor)):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we return "should stop" so we stop, but
# without an exception.
return True
class RunHookAdapterForMonitors(session_run_hook.SessionRunHook):
"""Wraps monitors into a SessionRunHook."""
def __init__(self, monitors):
self._monitors = monitors
def begin(self):
self._last_step = None
self._global_step_tensor = contrib_variables.get_global_step()
for m in self._monitors:
m.begin(max_steps=None)
def before_run(self, run_context):
if self._last_step is None:
self._last_step = run_context.session.run(self._global_step_tensor) + 1
request = {self._global_step_tensor: self._global_step_tensor}
monitor_fetches = []
for m in self._monitors:
monitor_requests = m.step_begin(self._last_step)
if monitor_requests:
if not isinstance(monitor_requests, list):
raise ValueError("Monitor.step_begin should return a list.")
monitor_fetches.extend(monitor_requests)
if monitor_fetches:
request["monitors"] = dict(
zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches]))
return session_run_hook.SessionRunArgs(request)
def after_run(self, run_context, run_values):
result = run_values.results[
"monitors"] if "monitors" in run_values.results else {}
for m in self._monitors:
induce_stop = m.step_end(self._last_step, result)
if induce_stop:
run_context.request_stop()
for m in self._monitors:
m.post_step(self._last_step, run_context.session)
self._last_step = run_values.results[self._global_step_tensor] + 1
def end(self, session):
self._last_step = None
for m in self._monitors:
if "session" in inspect.getargspec(m.end).args:
m.end(session=session)
else:
m.end()
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| 35.425719
| 89
| 0.69116
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import summary_io
class BaseMonitor(object):
def __init__(self):
self._begun = False
self._current_epoch = None
self._current_step = None
self._max_steps = None
self._estimator = None
self._estimator_locked = False
@property
def run_on_all_workers(self):
return False
def set_estimator(self, estimator):
if self._estimator_locked:
return
if estimator is None:
raise ValueError("Missing estimator.")
self._estimator = estimator
def _lock_estimator(self):
self._estimator_locked = True
def _unlock_estimator(self):
self._estimator_locked = False
def begin(self, max_steps=None):
if self._begun:
raise ValueError("begin called twice without end.")
self._max_steps = max_steps
self._begun = True
def end(self, session=None):
_ = session
if not self._begun:
raise ValueError("end called without begin.")
self._max_steps = None
self._begun = False
def epoch_begin(self, epoch):
if self._current_epoch is not None:
raise ValueError("epoch_begin called twice without epoch_end.")
if epoch < 0:
raise ValueError("Invalid epoch %s." % epoch)
self._current_epoch = epoch
def epoch_end(self, epoch):
if self._current_epoch != epoch:
raise ValueError(
"epoch_end expected %s but got %s.", self._current_epoch, epoch)
self._current_epoch = None
def step_begin(self, step):
if (step < 0) or (
(self._max_steps is not None) and (step > self._max_steps)):
raise ValueError("Invalid step %s." % step)
self._current_step = step
return []
def step_end(self, step, output):
if self._current_step != step:
raise ValueError(
"step_end expected %s but got %s.", self._current_step, step)
self._current_step = None
return False
def post_step(self, step, session):
_ = step, session
def _extract_output(outputs, request):
if request in outputs:
return outputs[request]
return outputs[request.name]
class EveryN(BaseMonitor):
def __init__(self, every_n_steps=100, first_n_steps=1):
super(EveryN, self).__init__()
self._every_n_steps = every_n_steps
self._first_n_steps = first_n_steps
self._last_successful_step = None
self._last_active_step = 0
self._every_n_step_begin_called = False
def every_n_step_begin(self, step):
return []
def every_n_step_end(self, step, outputs):
return False
def every_n_post_step(self, step, session):
pass
def step_begin(self, step):
super(EveryN, self).step_begin(step)
if (step <= self._first_n_steps or
step >= (self._every_n_steps + self._last_active_step) or
step == self._max_steps):
self._every_n_step_begin_called = True
return self.every_n_step_begin(step)
self._every_n_step_begin_called = False
return []
def step_end(self, step, output):
super(EveryN, self).step_end(step, output)
if self._every_n_step_begin_called:
return self.every_n_step_end(step, output)
return False
def post_step(self, step, session):
super(EveryN, self).post_step(step, session)
if self._every_n_step_begin_called:
self.every_n_post_step(step, session)
self._last_active_step = step
self._last_successful_step = step
def end(self, session=None):
super(EveryN, self).end(session=session)
if self._last_successful_step != self._last_active_step:
self.every_n_post_step(self._last_successful_step, session)
class StopAtStep(BaseMonitor):
def __init__(self, num_steps=None, last_step=None):
super(StopAtStep, self).__init__()
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
@property
def run_on_all_workers(self):
return True
def step_begin(self, step):
super(StopAtStep, self).step_begin(step)
if self._last_step is None:
self._last_step = step + self._num_steps - 1
return []
def step_end(self, step, output):
super(StopAtStep, self).step_end(step, output)
return step >= self._last_step
class PrintTensor(EveryN):
def __init__(self, tensor_names, every_n=100, first_n=1):
super(PrintTensor, self).__init__(every_n, first_n)
if not isinstance(tensor_names, dict):
tensor_names = {item: item for item in tensor_names}
self._tensor_names = tensor_names
def every_n_step_begin(self, step):
super(PrintTensor, self).every_n_step_begin(step)
return list(self._tensor_names.values())
def every_n_step_end(self, step, outputs):
super(PrintTensor, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._tensor_names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Step %d: %s", step, ", ".join(stats))
class LoggingTrainable(EveryN):
def __init__(self, scope=None, every_n=100, first_n=1):
super(LoggingTrainable, self).__init__(every_n, first_n)
self._scope = scope
def every_n_step_begin(self, step):
super(LoggingTrainable, self).every_n_step_begin(step)
# Get a list of trainable variables at the begining of every N steps.
# We cannot get this in __init__ because train_op has not been generated.
trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
scope=self._scope)
self._names = {}
for var in trainables:
self._names[var.name] = var.value().name
return list(self._names.values())
def every_n_step_end(self, step, outputs):
super(LoggingTrainable, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats))
class SummarySaver(EveryN):
def __init__(self,
summary_op,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None):
# TODO(ipolosukhin): Implement every N seconds.
super(SummarySaver, self).__init__(every_n_steps=save_steps)
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = summary_io.SummaryWriter(output_dir)
self._scaffold = scaffold
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def set_estimator(self, estimator):
super(SummarySaver, self).set_estimator(estimator)
# TODO(mdan): This line looks redundant.
if self._summary_writer is None:
self._summary_writer = summary_io.SummaryWriter(estimator.model_dir)
def every_n_step_begin(self, step):
super(SummarySaver, self).every_n_step_begin(step)
if self._summary_op is None and self._scaffold is not None:
self._summary_op = self._scaffold.summary_op
if self._summary_op is not None:
return [self._summary_op]
return []
def every_n_step_end(self, step, outputs):
super(SummarySaver, self).every_n_step_end(step, outputs)
if self._summary_op is not None:
summary_strs = _extract_output(outputs, self._summary_op)
if self._summary_writer:
self._summary_writer.add_summary(summary_strs, step)
return False
def end(self, session=None):
super(SummarySaver, self).end(session=session)
if self._summary_writer:
self._summary_writer.flush()
class ValidationMonitor(EveryN):
def __init__(self, x=None, y=None, input_fn=None, batch_size=None,
eval_steps=None,
every_n_steps=100, metrics=None, early_stopping_rounds=None,
early_stopping_metric="loss",
early_stopping_metric_minimize=True, name=None):
super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps,
first_n_steps=-1)
# TODO(mdan): Checks like this are already done by evaluate.
if x is None and input_fn is None:
raise ValueError("Either x or input_fn should be provided.")
self.x = x
self.y = y
self.input_fn = input_fn
self.batch_size = batch_size
self.eval_steps = eval_steps
self.metrics = metrics
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_metric = early_stopping_metric
self.early_stopping_metric_minimize = early_stopping_metric_minimize
self.name = name
self._best_value_step = None
self._best_value = None
self._early_stopped = False
self._latest_path = None
self._latest_path_step = None
@property
def early_stopped(self):
return self._early_stopped
@property
def best_step(self):
return self._best_value_step
@property
def best_value(self):
return self._best_value
def every_n_step_end(self, step, outputs):
super(ValidationMonitor, self).every_n_step_end(step, outputs)
# TODO(mdan): The use of step below is probably misleading.
# The code should probably use the step from the checkpoint, because
# that's what is being evaluated.
if self._estimator is None:
raise ValueError("Missing call to set_estimator.")
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.debug("Skipping evaluation since model has not been saved yet "
"at step %d.", step)
return False
if latest_path is not None and latest_path == self._latest_path:
logging.debug("Skipping evaluation due to same checkpoint %s for step %d "
"as for step %d.", latest_path, step,
self._latest_path_step)
return False
self._latest_path = latest_path
self._latest_path_step = step
validation_outputs = self._estimator.evaluate(
x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size,
steps=self.eval_steps, metrics=self.metrics, name=self.name)
stats = []
for name in validation_outputs:
stats.append("%s = %s" % (name, str(validation_outputs[name])))
logging.info("Validation (step %d): %s", step, ", ".join(stats))
if self.early_stopping_rounds is not None:
if self.early_stopping_metric not in validation_outputs:
raise ValueError("Metric %s missing from outputs %s." % (
self.early_stopping_metric, set(validation_outputs.keys())))
current_value = validation_outputs[self.early_stopping_metric]
if (self._best_value is None or (self.early_stopping_metric_minimize and
(current_value < self._best_value)) or
(not self.early_stopping_metric_minimize and
(current_value > self._best_value))):
self._best_value = current_value
self._best_value_step = step
stop_now = (step - self._best_value_step >= self.early_stopping_rounds)
if stop_now:
logging.info("Stopping. Best step: {} with {} = {}."
.format(self._best_value_step,
self.early_stopping_metric, self._best_value))
self._early_stopped = True
return True
return False
class CaptureVariable(EveryN):
def __init__(self, var_name, every_n=100, first_n=1):
super(CaptureVariable, self).__init__(every_n, first_n)
self._var_name = var_name
self._var_values = {}
@property
def values(self):
return self._var_values
def every_n_step_begin(self, step):
super(CaptureVariable, self).every_n_step_begin(step)
return [self._var_name]
def every_n_step_end(self, step, outputs):
super(CaptureVariable, self).every_n_step_end(step, outputs)
self._var_values[step] = _extract_output(outputs, self._var_name)
def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100,
output_dir=None, summary_writer=None):
monitors = []
if loss_op is not None:
monitors.append(PrintTensor(tensor_names={"loss": loss_op.name}))
if summary_op is not None:
monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps,
output_dir=output_dir,
summary_writer=summary_writer))
return monitors
class GraphDump(BaseMonitor):
IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder",
"RandomUniform", "Cast", "RestoreSlice"]
def __init__(self, ignore_ops=None):
super(GraphDump, self).__init__()
self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS
self._data = {}
def begin(self, max_steps=None):
super(GraphDump, self).begin(max_steps=max_steps)
self._tensors = []
graph = ops.get_default_graph()
graph_def = graph.as_graph_def()
for node in graph_def.node:
if node.op in self._ignore_ops:
continue
logging.info("op=%s name=%s.", node.op, node.name)
try:
self._tensors.append(graph.get_tensor_by_name(node.name + ":0"))
except KeyError:
pass
def step_begin(self, step):
super(GraphDump, self).step_begin(step)
return self._tensors
def step_end(self, step, output):
super(GraphDump, self).step_end(step, output)
self._data[step] = output
@property
def data(self):
return self._data
def compare(self, other_dump, step, atol=1e-06):
non_matched = {}
matched = []
this_output = self.data[step] if step in self.data else {}
other_output = other_dump.data[step] if step in other_dump.data else {}
for key in this_output:
if not isinstance(key, str) and not isinstance(key, unicode):
continue
if key not in other_output:
raise ValueError("%s missing at step %s.", (key, step))
value1 = _extract_output(this_output, key)
value2 = _extract_output(other_output, key)
if isinstance(value1, str):
continue
if isinstance(value1, np.ndarray):
if not np.allclose(value1, value2, atol=atol):
non_matched[key] = value1 - value2
else:
matched.append(key)
else:
if value1 != value2:
non_matched[key] = (value1, value2)
else:
matched.append(key)
return matched, non_matched
class ExportMonitor(EveryN):
@deprecated_arg_values(
"2016-09-23",
"The signature of the input_fn accepted by export is changing to be "
"consistent with what's used by tf.Learn Estimator's train/evaluate. "
"input_fn (and in most cases, input_feature_key) will both become "
"required args.",
input_fn=None)
def __init__(self,
every_n_steps,
export_dir,
input_fn=None,
input_feature_key=None,
exports_to_keep=5,
signature_fn=None,
default_batch_size=1):
super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)
self._export_dir = export_dir
self._input_fn = input_fn
self._input_feature_key = input_feature_key
self._use_deprecated_input_fn = input_fn is None
self._exports_to_keep = exports_to_keep
self._signature_fn = signature_fn
self._default_batch_size = default_batch_size
self._last_export_dir = None
@property
def export_dir(self):
return self._export_dir
@property
def exports_to_keep(self):
return self._exports_to_keep
@property
def signature_fn(self):
return self._signature_fn
@property
def last_export_dir(self):
return self._last_export_dir
def every_n_step_end(self, step, outputs):
super(ExportMonitor, self).every_n_step_end(step, outputs)
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
# Currently we are not syncronized with saving checkpoints, which leads to
# runtime errors when we are calling export on the same global step.
# Exports depend on saved checkpoints for constructing the graph and
# getting the global step from the graph instance saved in the checkpoint.
# If the checkpoint is stale with respect to current step, the global step
# is taken to be the last saved checkpoint's global step and exporter
logging.info("Skipping exporting because the existing checkpoint has "
"already been exported. "
"Consider exporting less frequently.")
def end(self, session=None):
super(ExportMonitor, self).end(session=session)
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.info("Skipping export at the end since model has not been saved "
"yet.")
return
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
logging.info("Skipping exporting for the same step.")
class CheckpointSaver(BaseMonitor):
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
logging.info("Create CheckpointSaver.")
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self, max_steps=None):
super(CheckpointSaver, self).begin(max_steps)
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
def step_begin(self, step):
super(CheckpointSaver, self).step_begin(step)
self._last_begin_step = step
def post_step(self, step, session):
super(CheckpointSaver, self).post_step(step, session)
if self._last_saved_time is None:
self._save(step, session)
if self._save_steps is not None:
if step >= self._last_saved_step + self._save_steps:
self._save(step, session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(step, session)
def end(self, session=None):
super(CheckpointSaver, self).end(session)
self._save(self._last_begin_step, session)
def _save(self, step, session):
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounter(EveryN):
def __init__(self, every_n_steps=100, output_dir=None,
summary_writer=None):
super(StepCounter, self).__init__(every_n_steps=every_n_steps)
self._summary_tag = "global_step/sec"
self._last_reported_step = None
self._last_reported_time = None
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def set_estimator(self, estimator):
super(StepCounter, self).set_estimator(estimator)
if self._summary_writer is None:
self._summary_writer = SummaryWriterCache.get(estimator.model_dir)
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
if self._last_reported_time is not None and self._summary_writer:
added_steps = current_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, current_step)
self._last_reported_step = current_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanLoss(EveryN):
def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):
super(NanLoss, self).__init__(every_n_steps=every_n_steps)
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def every_n_step_begin(self, step):
super(NanLoss, self).every_n_step_begin(step)
return [self._loss_tensor]
def every_n_step_end(self, step, outputs):
super(NanLoss, self).every_n_step_end(step, outputs)
if np.isnan(_extract_output(outputs, self._loss_tensor)):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we return "should stop" so we stop, but
return True
class RunHookAdapterForMonitors(session_run_hook.SessionRunHook):
def __init__(self, monitors):
self._monitors = monitors
def begin(self):
self._last_step = None
self._global_step_tensor = contrib_variables.get_global_step()
for m in self._monitors:
m.begin(max_steps=None)
def before_run(self, run_context):
if self._last_step is None:
self._last_step = run_context.session.run(self._global_step_tensor) + 1
request = {self._global_step_tensor: self._global_step_tensor}
monitor_fetches = []
for m in self._monitors:
monitor_requests = m.step_begin(self._last_step)
if monitor_requests:
if not isinstance(monitor_requests, list):
raise ValueError("Monitor.step_begin should return a list.")
monitor_fetches.extend(monitor_requests)
if monitor_fetches:
request["monitors"] = dict(
zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches]))
return session_run_hook.SessionRunArgs(request)
def after_run(self, run_context, run_values):
result = run_values.results[
"monitors"] if "monitors" in run_values.results else {}
for m in self._monitors:
induce_stop = m.step_end(self._last_step, result)
if induce_stop:
run_context.request_stop()
for m in self._monitors:
m.post_step(self._last_step, run_context.session)
self._last_step = run_values.results[self._global_step_tensor] + 1
def end(self, session):
self._last_step = None
for m in self._monitors:
if "session" in inspect.getargspec(m.end).args:
m.end(session=session)
else:
m.end()
def _as_graph_element(obj):
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| true
| true
|
790b7011c7cbd959fb9330d296ab2129185ff98d
| 2,670
|
py
|
Python
|
waves_gateway/model/polling_delay_config.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 25
|
2018-03-04T07:49:21.000Z
|
2022-03-28T05:20:50.000Z
|
waves_gateway/model/polling_delay_config.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 22
|
2018-03-25T13:19:45.000Z
|
2020-11-28T17:21:08.000Z
|
waves_gateway/model/polling_delay_config.py
|
NeolithEra/WavesGatewayFramework
|
e7ba892427e1d0444f2bfdc2922c45ff5f4c4add
|
[
"MIT"
] | 31
|
2018-03-25T09:45:13.000Z
|
2022-03-24T05:32:18.000Z
|
"""
PollingDelayConfig
"""
from typing import Any
class PollingDelayConfig(object):
"""
Summarized configuration for the polling_delay settings in the Gateway Application.
"""
DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S = 0.0
DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S = 60.0
DEFAULT_MIN_ATTEMPT_LIST_WORKER_DELAY_S = 0.1
DEFAULT_MAX_ATTEMPT_LIST_WORKER_DELAY_S = 60.0
def __init__(self,
coin_min_polling_delay_s: float = DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S,
coin_max_polling_delay_s: float = DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S,
waves_min_polling_delay_s: float = DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S,
waves_max_polling_delay_s: float = DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S,
attempt_list_worker_min_polling_delay_s: float = DEFAULT_MIN_ATTEMPT_LIST_WORKER_DELAY_S,
attempt_list_worker_max_polling_delay_s: float = DEFAULT_MAX_ATTEMPT_LIST_WORKER_DELAY_S) -> None:
self._coin_polling_delay_s_min = coin_min_polling_delay_s
self._coin_polling_delay_s_max = coin_max_polling_delay_s
self._waves_polling_delay_s_min = waves_min_polling_delay_s
self._waves_polling_delay_s_max = waves_max_polling_delay_s
self._attempt_list_worker_min_polling_delay_s = attempt_list_worker_min_polling_delay_s
self._attempt_list_worker_max_polling_delay_s = attempt_list_worker_max_polling_delay_s
@staticmethod
def from_single_polling_delay(polling_delay_s: float) -> Any:
return PollingDelayConfig(
coin_min_polling_delay_s=polling_delay_s,
coin_max_polling_delay_s=polling_delay_s,
waves_min_polling_delay_s=polling_delay_s,
waves_max_polling_delay_s=polling_delay_s,
attempt_list_worker_min_polling_delay_s=polling_delay_s,
attempt_list_worker_max_polling_delay_s=polling_delay_s)
@property
def waves_max_polling_delay_s(self) -> float:
return self._waves_polling_delay_s_max
@property
def waves_min_polling_delay_s(self) -> float:
return self._waves_polling_delay_s_min
@property
def coin_min_polling_delay_s(self) -> float:
return self._coin_polling_delay_s_min
@property
def coin_max_polling_delay_s(self) -> float:
return self._coin_polling_delay_s_max
@property
def attempt_list_worker_min_polling_delay_s(self) -> float:
return self._attempt_list_worker_min_polling_delay_s
@property
def attempt_list_worker_max_polling_delay_s(self) -> float:
return self._attempt_list_worker_max_polling_delay_s
| 41.71875
| 115
| 0.762921
|
from typing import Any
class PollingDelayConfig(object):
DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S = 0.0
DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S = 60.0
DEFAULT_MIN_ATTEMPT_LIST_WORKER_DELAY_S = 0.1
DEFAULT_MAX_ATTEMPT_LIST_WORKER_DELAY_S = 60.0
def __init__(self,
coin_min_polling_delay_s: float = DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S,
coin_max_polling_delay_s: float = DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S,
waves_min_polling_delay_s: float = DEFAULT_MIN_TRANSACTION_POLLING_DELAY_S,
waves_max_polling_delay_s: float = DEFAULT_MAX_TRANSACTION_POLLING_DELAY_S,
attempt_list_worker_min_polling_delay_s: float = DEFAULT_MIN_ATTEMPT_LIST_WORKER_DELAY_S,
attempt_list_worker_max_polling_delay_s: float = DEFAULT_MAX_ATTEMPT_LIST_WORKER_DELAY_S) -> None:
self._coin_polling_delay_s_min = coin_min_polling_delay_s
self._coin_polling_delay_s_max = coin_max_polling_delay_s
self._waves_polling_delay_s_min = waves_min_polling_delay_s
self._waves_polling_delay_s_max = waves_max_polling_delay_s
self._attempt_list_worker_min_polling_delay_s = attempt_list_worker_min_polling_delay_s
self._attempt_list_worker_max_polling_delay_s = attempt_list_worker_max_polling_delay_s
@staticmethod
def from_single_polling_delay(polling_delay_s: float) -> Any:
return PollingDelayConfig(
coin_min_polling_delay_s=polling_delay_s,
coin_max_polling_delay_s=polling_delay_s,
waves_min_polling_delay_s=polling_delay_s,
waves_max_polling_delay_s=polling_delay_s,
attempt_list_worker_min_polling_delay_s=polling_delay_s,
attempt_list_worker_max_polling_delay_s=polling_delay_s)
@property
def waves_max_polling_delay_s(self) -> float:
return self._waves_polling_delay_s_max
@property
def waves_min_polling_delay_s(self) -> float:
return self._waves_polling_delay_s_min
@property
def coin_min_polling_delay_s(self) -> float:
return self._coin_polling_delay_s_min
@property
def coin_max_polling_delay_s(self) -> float:
return self._coin_polling_delay_s_max
@property
def attempt_list_worker_min_polling_delay_s(self) -> float:
return self._attempt_list_worker_min_polling_delay_s
@property
def attempt_list_worker_max_polling_delay_s(self) -> float:
return self._attempt_list_worker_max_polling_delay_s
| true
| true
|
790b701ad0a2e191a9ac54eb2d8a65fa9667669e
| 3,592
|
py
|
Python
|
leo/plugins/leowapp.py
|
leonidborisenko/leo-editor
|
db55bd00c94fb8501795284453891ad64ce12af9
|
[
"MIT"
] | 2
|
2020-01-19T18:11:05.000Z
|
2020-01-19T18:12:07.000Z
|
leo/plugins/leowapp.py
|
leonidborisenko/leo-editor
|
db55bd00c94fb8501795284453891ad64ce12af9
|
[
"MIT"
] | 1
|
2020-06-19T02:28:25.000Z
|
2020-06-19T02:28:25.000Z
|
leo/plugins/leowapp.py
|
leonidborisenko/leo-editor
|
db55bd00c94fb8501795284453891ad64ce12af9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20181028052650.1: * @file leowapp.py
#@@first
'''
This file is deprecated/obsolete. It may be removed soon.
leoflexx.py implements LeoWapp using flexx.
'''
#@+<< imports >>
#@+node:ekr.20181028052650.3: ** << imports >>
import leo.core.leoGlobals as g
import leo.core.leoFrame as leoFrame
import leo.core.leoGui as leoGui
import sys
try:
import websockets
assert websockets
except ImportError:
websockets = None
print('leowapp.py requires websockets')
print('>pip install websockets')
import xml.sax.saxutils as saxutils
#@-<< imports >>
#@+<< config >>
#@+node:ekr.20181029070405.1: ** << config >>
class Config:
# ip = g.app.config.getString("leowapp-ip") or '127.0.0.1'
# port = g.app.config.getInt("leowapp-port") or 8100
# timeout = g.app.config.getInt("leowapp-timeout") or 0
# if timeout > 0: timeout = timeout / 1000.0
ip = '127.0.0.1'
port = 5678
# port = 8100
timeout = 0
# Create a singleton instance.
# The initial values probably should not be changed.
config = Config()
#@-<< config >>
# browser_encoding = 'utf-8'
# To do: query browser: var x = document.characterSet;
#@+others
#@+node:ekr.20181030103048.2: ** escape
def escape(s):
'''
Do the standard xml escapes, and replace newlines and tabs.
'''
return saxutils.escape(s, {
'\n': '<br />',
'\t': ' ',
})
#@+node:ekr.20181028052650.5: ** init (leowapp.py)
def init():
'''Return True if the plugin has loaded successfully.'''
if not websockets:
return False
# ws_server hangs Leo!
# ws_server()
g.plugin_signon(__name__)
return True
#@+node:ekr.20181031162039.1: ** class BrowserGui (leoGui.LeoGui)
class BrowserGui(leoGui.NullGui):
#@+others
#@+node:ekr.20181031160042.1: *3* bg.__getattr__
def __getattr__ (self, attr):
'''Handle an missing attribute.'''
if attr in (
'frameFactory',
'set_minibuffer_label',
):
# These are optional ivars.
raise AttributeError
return self.message(attr)
#@+node:ekr.20181031162620.1: *3* bg.__init__
def __init__(self):
g.trace('===== (BrowserGui)')
leoGui.NullGui.__init__(self, guiName='browser')
self.styleSheetManagerClass = g.NullObject
self.log = leoFrame.NullLog()
#@+node:ekr.20181101034427.1: *3* bg.createLeoFrame
def createLeoFrame(self, c, title):
return leoFrame.NullFrame(c, title='NullFrame', gui=self)
#@+node:ekr.20181101025053.1: *3* bg.message
def message (self, func):
'''
Send a message to the framework.
'''
g.trace('=====', func, g.callers())
#@+node:ekr.20181031162454.1: *3* bg.runMainLoop
def runMainLoop(self, fileName=None):
'''The main loop for the browser gui.'''
# pylint: disable=arguments-differ
if fileName:
print('LeoWapp running: %s...' % g.shortFileName(fileName))
else:
print('LeoWapp running...')
if 0: # Run all unit tests.
path = g.os_path_finalize_join(
g.app.loadDir, '..', 'test', 'unittest.leo')
c = g.openWithFileName(path, gui=self)
c.findCommands.ftm = g.NullObject()
# A hack. Maybe the NullGui should do this.
c.debugCommands.runAllUnitTestsLocally()
print('calling sys.exit(0)')
sys.exit(0)
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@-leo
| 30.700855
| 71
| 0.609131
|
import leo.core.leoGlobals as g
import leo.core.leoFrame as leoFrame
import leo.core.leoGui as leoGui
import sys
try:
import websockets
assert websockets
except ImportError:
websockets = None
print('leowapp.py requires websockets')
print('>pip install websockets')
import xml.sax.saxutils as saxutils
class Config:
ip = '127.0.0.1'
port = 5678
timeout = 0
config = Config()
def escape(s):
return saxutils.escape(s, {
'\n': '<br />',
'\t': ' ',
})
def init():
if not websockets:
return False
g.plugin_signon(__name__)
return True
class BrowserGui(leoGui.NullGui):
def __getattr__ (self, attr):
if attr in (
'frameFactory',
'set_minibuffer_label',
):
raise AttributeError
return self.message(attr)
def __init__(self):
g.trace('===== (BrowserGui)')
leoGui.NullGui.__init__(self, guiName='browser')
self.styleSheetManagerClass = g.NullObject
self.log = leoFrame.NullLog()
def createLeoFrame(self, c, title):
return leoFrame.NullFrame(c, title='NullFrame', gui=self)
def message (self, func):
g.trace('=====', func, g.callers())
def runMainLoop(self, fileName=None):
if fileName:
print('LeoWapp running: %s...' % g.shortFileName(fileName))
else:
print('LeoWapp running...')
if 0:
path = g.os_path_finalize_join(
g.app.loadDir, '..', 'test', 'unittest.leo')
c = g.openWithFileName(path, gui=self)
c.findCommands.ftm = g.NullObject()
c.debugCommands.runAllUnitTestsLocally()
print('calling sys.exit(0)')
sys.exit(0)
| true
| true
|
790b7245570a2e902f9ea79ecfed6a8a92ec5c54
| 2,627
|
py
|
Python
|
python/html5lib/utils.py
|
gsnedders/html5lib
|
a426e4a96f0660b83f3f0bbe6c8160c6f625f199
|
[
"MIT"
] | 10
|
2015-02-27T14:06:15.000Z
|
2022-01-06T14:17:28.000Z
|
html5lib/utils.py
|
rcarmo/soup-strainer
|
78304a23bf3a40590e3c322861367594ea28ee32
|
[
"MIT"
] | null | null | null |
html5lib/utils.py
|
rcarmo/soup-strainer
|
78304a23bf3a40590e3c322861367594ea28ee32
|
[
"MIT"
] | 3
|
2015-05-23T04:49:48.000Z
|
2021-02-02T21:12:20.000Z
|
from __future__ import absolute_import
from types import ModuleType
class MethodDispatcher(dict):
u"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name,value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
__init__.func_annotations = {}
def __getitem__(self, key):
return dict.get(self, key, self.default)
__getitem__.func_annotations = {}
#Some utility functions to dal with weirdness around UCS2 vs UCS4
#python builds
def encodingType():
if len() == 2:
return u"UCS2"
else:
return u"UCS4"
encodingType.func_annotations = {}
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
isSurrogatePair.func_annotations = {}
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
surrogatePairToCodepoint.func_annotations = {}
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if type(ModuleType.__name__) is unicode:
name = u"_%s_factory" % baseModule.__name__
else:
name = "_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
moduleFactory.func_annotations = {}
return moduleFactory
moduleFactoryFactory.func_annotations = {}
| 31.650602
| 78
| 0.633422
|
from __future__ import absolute_import
from types import ModuleType
class MethodDispatcher(dict):
def __init__(self, items=()):
_dictEntries = []
for name,value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
__init__.func_annotations = {}
def __getitem__(self, key):
return dict.get(self, key, self.default)
__getitem__.func_annotations = {}
def encodingType():
if len() == 2:
return u"UCS2"
else:
return u"UCS4"
encodingType.func_annotations = {}
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
isSurrogatePair.func_annotations = {}
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
surrogatePairToCodepoint.func_annotations = {}
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if type(ModuleType.__name__) is unicode:
name = u"_%s_factory" % baseModule.__name__
else:
name = "_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
moduleFactory.func_annotations = {}
return moduleFactory
moduleFactoryFactory.func_annotations = {}
| true
| true
|
790b72b5977bc41bc1fa4f394888d33023e6e512
| 1,309
|
py
|
Python
|
array/bot/others/P_ex07.py
|
timkphd/examples
|
04c162ec890a1c9ba83498b275fbdc81a4704062
|
[
"Unlicense"
] | 5
|
2020-11-01T00:29:22.000Z
|
2022-01-24T19:09:47.000Z
|
array/bot/others/P_ex07.py
|
timkphd/examples
|
04c162ec890a1c9ba83498b275fbdc81a4704062
|
[
"Unlicense"
] | 1
|
2022-02-09T01:59:47.000Z
|
2022-02-09T01:59:47.000Z
|
array/bot/others/P_ex07.py
|
timkphd/examples
|
04c162ec890a1c9ba83498b275fbdc81a4704062
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#
# This program shows how to use MPI_Alltoall. Each processor
# send/rec a different random number to/from other processors.
#
# numpy is required
import numpy
from numpy import *
# mpi4py module
from mpi4py import MPI
import sys
def myquit(mes):
MPI.Finalize()
print(mes)
sys.exit()
# Initialize MPI and print out hello
comm=MPI.COMM_WORLD
myid=comm.Get_rank()
numprocs=comm.Get_size()
print("hello from ",myid," of ",numprocs)
# We are going to send/recv a single value to/from
# each processor. Here we allocate arrays
s_vals=zeros(numprocs,"i")
r_vals=zeros(numprocs,"i")
# Fill the send arrays with random numbers
random.seed(myid)
for i in range(0, numprocs):
s_vals[i]=random.randint(1,10)
print("myid=",myid,"s_vals=",s_vals)
# Send/recv to/from all
comm.Alltoall(s_vals, r_vals)
print("myid=",myid,"r_vals=",r_vals)
MPI.Finalize()
# Note, the sent values and the recv values are
# like a transpose of each other
#
# mpiexec -n 4 ./P_ex07.py | grep s_v | sort
# myid= 0 s_vals= [6 1 4 4]
# myid= 1 s_vals= [6 9 6 1]
# myid= 2 s_vals= [9 9 7 3]
# myid= 3 s_vals= [9 4 9 9]
# mpiexec -n 4 ./P_ex07.py | grep r_v | sort
# myid= 0 r_vals= [6 6 9 9]
# myid= 1 r_vals= [1 9 9 4]
# myid= 2 r_vals= [4 6 7 9]
# myid= 3 r_vals= [4 1 3 9]
| 20.453125
| 63
| 0.675325
|
import numpy
from numpy import *
from mpi4py import MPI
import sys
def myquit(mes):
MPI.Finalize()
print(mes)
sys.exit()
comm=MPI.COMM_WORLD
myid=comm.Get_rank()
numprocs=comm.Get_size()
print("hello from ",myid," of ",numprocs)
s_vals=zeros(numprocs,"i")
r_vals=zeros(numprocs,"i")
random.seed(myid)
for i in range(0, numprocs):
s_vals[i]=random.randint(1,10)
print("myid=",myid,"s_vals=",s_vals)
comm.Alltoall(s_vals, r_vals)
print("myid=",myid,"r_vals=",r_vals)
MPI.Finalize()
| true
| true
|
790b72d7f3ae9ae0651615ee50721bcffe98f9a0
| 22,370
|
py
|
Python
|
evennia/scripts/tickerhandler.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/scripts/tickerhandler.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/scripts/tickerhandler.py
|
pakhnu/my-world
|
405983dca81e70fc64d58d6a60126ffa5e8ada8c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
TickerHandler
This implements an efficient Ticker which uses a subscription
model to 'tick' subscribed objects at regular intervals.
The ticker mechanism is used by importing and accessing
the instantiated TICKER_HANDLER instance in this module. This
instance is run by the server; it will save its status across
server reloads and be started automaticall on boot.
Example:
```python
from evennia.scripts.tickerhandler import TICKER_HANDLER
# call tick myobj.at_tick(*args, **kwargs) every 15 seconds
TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs)
```
You supply the interval to tick and a callable to call regularly
with any extra args/kwargs. The handler will transparently set
up and add new timers behind the scenes to tick at given intervals,
using a TickerPool - all callables with the same interval will share
the interval ticker.
To remove:
```python
TICKER_HANDLER.remove(15, myobj.at_tick)
```
Both interval and callable must be given since a single object can be subscribed
to many different tickers at the same time. You can also supply `idstring`
as an identifying string if you ever want to tick the callable at the same interval
but with different arguments (args/kwargs are not used for identifying the ticker). There
is also `persistent=False` if you don't want to make a ticker that don't survive a reload.
If either or both `idstring` or `persistent` has been changed from their defaults, they
must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker
to remove.
The TickerHandler's functionality can be overloaded by modifying the
Ticker class and then changing TickerPool and TickerHandler to use the
custom classes
```python
class MyTicker(Ticker):
# [doing custom stuff]
class MyTickerPool(TickerPool):
ticker_class = MyTicker
class MyTickerHandler(TickerHandler):
ticker_pool_class = MyTickerPool
```
If one wants to duplicate TICKER_HANDLER's auto-saving feature in
a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to
call the handler's `save()` and `restore()` methods when the server reboots.
"""
import inspect
from builtins import object
from twisted.internet.defer import inlineCallbacks
from django.core.exceptions import ObjectDoesNotExist
from evennia.scripts.scripts import ExtendedLoopingCall
from evennia.server.models import ServerConfig
from evennia.utils.logger import log_trace, log_err
from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj
from evennia.utils import variable_from_module
_GA = object.__getattribute__
_SA = object.__setattr__
_ERROR_ADD_TICKER = \
"""TickerHandler: Tried to add an invalid ticker:
{storekey}
Ticker was not added."""
class Ticker(object):
"""
Represents a repeatedly running task that calls
hooks repeatedly. Overload `_callback` to change the
way it operates.
"""
@inlineCallbacks
def _callback(self):
"""
This will be called repeatedly every `self.interval` seconds.
`self.subscriptions` contain tuples of (obj, args, kwargs) for
each subscribing object.
If overloading, this callback is expected to handle all
subscriptions when it is triggered. It should not return
anything and should not traceback on poorly designed hooks.
The callback should ideally work under @inlineCallbacks so it
can yield appropriately.
The _hook_key, which is passed down through the handler via
kwargs is used here to identify which hook method to call.
"""
self._to_add = []
self._to_remove = []
self._is_ticking = True
for store_key, (args, kwargs) in self.subscriptions.iteritems():
callback = yield kwargs.pop("_callback", "at_tick")
obj = yield kwargs.pop("_obj", None)
try:
if callable(callback):
# call directly
yield callback(*args, **kwargs)
continue
# try object method
if not obj or not obj.pk:
# object was deleted between calls
self._to_remove.append(store_key)
continue
else:
yield _GA(obj, callback)(*args, **kwargs)
except ObjectDoesNotExist:
log_trace("Removing ticker.")
self._to_remove.append(store_key)
except Exception:
log_trace()
finally:
# make sure to re-store
kwargs["_callback"] = callback
kwargs["_obj"] = obj
# cleanup - we do this here to avoid changing the subscription dict while it loops
self._is_ticking = False
for store_key in self._to_remove:
self.remove(store_key)
for store_key, (args, kwargs) in self._to_add:
self.add(store_key, *args, **kwargs)
self._to_remove = []
self._to_add = []
def __init__(self, interval):
"""
Set up the ticker
Args:
interval (int): The stepping interval.
"""
self.interval = interval
self.subscriptions = {}
self._is_ticking = False
self._to_remove = []
self._to_add = []
# set up a twisted asynchronous repeat call
self.task = ExtendedLoopingCall(self._callback)
def validate(self, start_delay=None):
"""
Start/stop the task depending on how many subscribers we have
using it.
Args:
start_delay (int): Time to way before starting.
"""
subs = self.subscriptions
if self.task.running:
if not subs:
self.task.stop()
elif subs:
self.task.start(self.interval, now=False, start_delay=start_delay)
def add(self, store_key, *args, **kwargs):
"""
Sign up a subscriber to this ticker.
Args:
store_key (str): Unique storage hash for this ticker subscription.
args (any, optional): Arguments to call the hook method with.
Kwargs:
_start_delay (int): If set, this will be
used to delay the start of the trigger instead of
`interval`.
"""
if self._is_ticking:
# protects the subscription dict from
# updating while it is looping
self._to_start.append((store_key, (args, kwargs)))
else:
start_delay = kwargs.pop("_start_delay", None)
self.subscriptions[store_key] = (args, kwargs)
self.validate(start_delay=start_delay)
def remove(self, store_key):
"""
Unsubscribe object from this ticker
Args:
store_key (str): Unique store key.
"""
if self._is_ticking:
# this protects the subscription dict from
# updating while it is looping
self._to_remove.append(store_key)
else:
self.subscriptions.pop(store_key, False)
self.validate()
def stop(self):
"""
Kill the Task, regardless of subscriptions.
"""
self.subscriptions = {}
self.validate()
class TickerPool(object):
"""
This maintains a pool of
`evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling
subscribed objects at given times.
"""
ticker_class = Ticker
def __init__(self):
"""
Initialize the pool.
"""
self.tickers = {}
def add(self, store_key, *args, **kwargs):
"""
Add new ticker subscriber.
Args:
store_key (str): Unique storage hash.
args (any, optional): Arguments to send to the hook method.
"""
_, _, _, interval, _, _ = store_key
if not interval:
log_err(_ERROR_ADD_TICKER.format(store_key=store_key))
return
if interval not in self.tickers:
self.tickers[interval] = self.ticker_class(interval)
self.tickers[interval].add(store_key, *args, **kwargs)
def remove(self, store_key):
"""
Remove subscription from pool.
Args:
store_key (str): Unique storage hash to remove
"""
_, _, _, interval, _, _ = store_key
if interval in self.tickers:
self.tickers[interval].remove(store_key)
if not self.tickers[interval]:
del self.tickers[interval]
def stop(self, interval=None):
"""
Stop all scripts in pool. This is done at server reload since
restoring the pool will automatically re-populate the pool.
Args:
interval (int, optional): Only stop tickers with this
interval.
"""
if interval and interval in self.tickers:
self.tickers[interval].stop()
else:
for ticker in self.tickers.values():
ticker.stop()
class TickerHandler(object):
"""
The Tickerhandler maintains a pool of tasks for subscribing
objects to various tick rates. The pool maintains creation
instructions and and re-applies them at a server restart.
"""
ticker_pool_class = TickerPool
def __init__(self, save_name="ticker_storage"):
"""
Initialize handler
save_name (str, optional): The name of the ServerConfig
instance to store the handler state persistently.
"""
self.ticker_storage = {}
self.save_name = save_name
self.ticker_pool = self.ticker_pool_class()
def _get_callback(self, callback):
"""
Analyze callback and determine its consituents
Args:
callback (function or method): This is either a stand-alone
function or class method on a typeclassed entitye (that is,
an entity that can be saved to the database).
Returns:
ret (tuple): This is a tuple of the form `(obj, path, callfunc)`,
where `obj` is the database object the callback is defined on
if it's a method (otherwise `None`) and vice-versa, `path` is
the python-path to the stand-alone function (`None` if a method).
The `callfunc` is either the name of the method to call or the
callable function object itself.
"""
outobj, outpath, outcallfunc = None, None, None
if callable(callback):
if inspect.ismethod(callback):
outobj = callback.im_self
outcallfunc = callback.im_func.func_name
elif inspect.isfunction(callback):
outpath = "%s.%s" % (callback.__module__, callback.func_name)
outcallfunc = callback
else:
raise TypeError("%s is not a callable function or method." % callback)
return outobj, outpath, outcallfunc
def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True):
"""
Tries to create a store_key for the object.
Args:
obj (Object, tuple or None): Subscribing object if any. If a tuple, this is
a packed_obj tuple from dbserialize.
path (str or None): Python-path to callable, if any.
interval (int): Ticker interval.
callfunc (callable or str): This is either the callable function or
the name of the method to call. Note that the callable is never
stored in the key; that is uniquely identified with the python-path.
idstring (str, optional): Additional separator between
different subscription types.
persistent (bool, optional): If this ticker should survive a system
shutdown or not.
Returns:
store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval,
idstring, persistent)` that uniquely identifies the
ticker. Here, `packed_obj` is the unique string representation of the
object or `None`. The `methodname` is the string name of the method on
`packed_obj` to call, or `None` if `packed_obj` is unset. `path` is
the Python-path to a non-method callable, or `None`. Finally, `interval`
`idstring` and `persistent` are integers, strings and bools respectively.
"""
interval = int(interval)
persistent = bool(persistent)
packed_obj = pack_dbobj(obj)
methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None
outpath = path if path and isinstance(path, basestring) else None
return (packed_obj, methodname, outpath, interval, idstring, persistent)
def save(self):
"""
Save ticker_storage as a serialized string into a temporary
ServerConf field. Whereas saving is done on the fly, if called
by server when it shuts down, the current timer of each ticker
will be saved so it can start over from that point.
"""
if self.ticker_storage:
# get the current times so the tickers can be restarted with a delay later
start_delays = dict((interval, ticker.task.next_call_time())
for interval, ticker in self.ticker_pool.tickers.items())
# remove any subscriptions that lost its object in the interim
to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items()
if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and
hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj
store_key[2])} # a path given
# update the timers for the tickers
for store_key, (args, kwargs) in to_save.items():
interval = store_key[1]
# this is a mutable, so it's updated in-place in ticker_storage
kwargs["_start_delay"] = start_delays.get(interval, None)
ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save))
else:
# make sure we have nothing lingering in the database
ServerConfig.objects.conf(key=self.save_name, delete=True)
def restore(self, server_reload=True):
"""
Restore ticker_storage from database and re-initialize the
handler from storage. This is triggered by the server at
restart.
Args:
server_reload (bool, optional): If this is False, it means
the server went through a cold reboot and all
non-persistent tickers must be killed.
"""
# load stored command instructions and use them to re-initialize handler
restored_tickers = ServerConfig.objects.conf(key=self.save_name)
if restored_tickers:
# the dbunserialize will convert all serialized dbobjs to real objects
restored_tickers = dbunserialize(restored_tickers)
self.ticker_storage = {}
for store_key, (args, kwargs) in restored_tickers.iteritems():
try:
# at this point obj is the actual object (or None) due to how
# the dbunserialize works
obj, callfunc, path, interval, idstring, persistent = store_key
if not persistent and not server_reload:
# this ticker will not be restarted
continue
if isinstance(callfunc, basestring) and not obj:
# methods must have an existing object
continue
# we must rebuild the store_key here since obj must not be
# stored as the object itself for the store_key to be hashable.
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
if obj and callfunc:
kwargs["_callback"] = callfunc
kwargs["_obj"] = obj
elif path:
modname, varname = path.rsplit(".", 1)
callback = variable_from_module(modname, varname)
kwargs["_callback"] = callback
kwargs["_obj"] = None
else:
# Neither object nor path - discard this ticker
log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key))
continue
except Exception:
# this suggests a malformed save or missing objects
log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key))
continue
# if we get here we should create a new ticker
self.ticker_storage[store_key] = (args, kwargs)
self.ticker_pool.add(store_key, *args, **kwargs)
def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs):
"""
Add subscription to tickerhandler
Args:
interval (int, optional): Interval in seconds between calling
`callable(*args, **kwargs)`
callable (callable function or method, optional): This
should either be a stand-alone function or a method on a
typeclassed entity (that is, one that can be saved to the
database).
idstring (str, optional): Identifier for separating
this ticker-subscription from others with the same
interval. Allows for managing multiple calls with
the same time interval and callback.
persistent (bool, optional): A ticker will always survive
a server reload. If this is unset, the ticker will be
deleted by a server shutdown.
args, kwargs (optional): These will be passed into the
callback every time it is called.
Notes:
The callback will be identified by type and stored either as
as combination of serialized database object + methodname or
as a python-path to the module + funcname. These strings will
be combined iwth `interval` and `idstring` to define a
unique storage key for saving. These must thus all be supplied
when wanting to modify/remove the ticker later.
"""
if isinstance(callback, int):
raise RuntimeError("TICKER_HANDLER.add has changed: "
"the interval is now the first argument, callback the second.")
obj, path, callfunc = self._get_callback(callback)
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
kwargs["_obj"] = obj
kwargs["_callback"] = callfunc # either method-name or callable
self.ticker_storage[store_key] = (args, kwargs)
self.ticker_pool.add(store_key, *args, **kwargs)
self.save()
def remove(self, interval=60, callback=None, idstring="", persistent=True):
"""
Remove object from ticker or only remove it from tickers with
a given interval.
Args:
interval (int, optional): Interval of ticker to remove.
callback (callable function or method): Either a function or
the method of a typeclassed object.
idstring (str, optional): Identifier id of ticker to remove.
"""
if isinstance(callback, int):
raise RuntimeError("TICKER_HANDLER.remove has changed: "
"the interval is now the first argument, callback the second.")
obj, path, callfunc = self._get_callback(callback)
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
to_remove = self.ticker_storage.pop(store_key, None)
if to_remove:
self.ticker_pool.remove(store_key)
self.save()
def clear(self, interval=None):
"""
Stop/remove tickers from handler.
Args:
interval (int): Only stop tickers with this interval.
Notes:
This is the only supported way to kill tickers related to
non-db objects.
"""
self.ticker_pool.stop(interval)
if interval:
self.ticker_storage = dict((store_key, store_key)
for store_key in self.ticker_storage
if store_key[1] != interval)
else:
self.ticker_storage = {}
self.save()
def all(self, interval=None):
"""
Get all subscriptions.
Args:
interval (int): Limit match to tickers with this interval.
Returns:
tickers (list): If `interval` was given, this is a list of
tickers using that interval.
tickerpool_layout (dict): If `interval` was *not* given,
this is a dict {interval1: [ticker1, ticker2, ...], ...}
"""
if interval is None:
# return dict of all, ordered by interval
return dict((interval, ticker.subscriptions)
for interval, ticker in self.ticker_pool.tickers.iteritems())
else:
# get individual interval
ticker = self.ticker_pool.tickers.get(interval, None)
if ticker:
return {interval: ticker.subscriptions}
def all_display(self):
"""
Get all tickers on an easily displayable form.
Returns:
tickers (dict): A list of all storekeys
"""
store_keys = []
for ticker in self.ticker_pool.tickers.itervalues():
for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems():
store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent))
return store_keys
# main tickerhandler
TICKER_HANDLER = TickerHandler()
| 38.568966
| 125
| 0.60903
|
import inspect
from builtins import object
from twisted.internet.defer import inlineCallbacks
from django.core.exceptions import ObjectDoesNotExist
from evennia.scripts.scripts import ExtendedLoopingCall
from evennia.server.models import ServerConfig
from evennia.utils.logger import log_trace, log_err
from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj
from evennia.utils import variable_from_module
_GA = object.__getattribute__
_SA = object.__setattr__
_ERROR_ADD_TICKER = \
"""TickerHandler: Tried to add an invalid ticker:
{storekey}
Ticker was not added."""
class Ticker(object):
@inlineCallbacks
def _callback(self):
self._to_add = []
self._to_remove = []
self._is_ticking = True
for store_key, (args, kwargs) in self.subscriptions.iteritems():
callback = yield kwargs.pop("_callback", "at_tick")
obj = yield kwargs.pop("_obj", None)
try:
if callable(callback):
yield callback(*args, **kwargs)
continue
if not obj or not obj.pk:
self._to_remove.append(store_key)
continue
else:
yield _GA(obj, callback)(*args, **kwargs)
except ObjectDoesNotExist:
log_trace("Removing ticker.")
self._to_remove.append(store_key)
except Exception:
log_trace()
finally:
kwargs["_callback"] = callback
kwargs["_obj"] = obj
self._is_ticking = False
for store_key in self._to_remove:
self.remove(store_key)
for store_key, (args, kwargs) in self._to_add:
self.add(store_key, *args, **kwargs)
self._to_remove = []
self._to_add = []
def __init__(self, interval):
self.interval = interval
self.subscriptions = {}
self._is_ticking = False
self._to_remove = []
self._to_add = []
self.task = ExtendedLoopingCall(self._callback)
def validate(self, start_delay=None):
subs = self.subscriptions
if self.task.running:
if not subs:
self.task.stop()
elif subs:
self.task.start(self.interval, now=False, start_delay=start_delay)
def add(self, store_key, *args, **kwargs):
if self._is_ticking:
self._to_start.append((store_key, (args, kwargs)))
else:
start_delay = kwargs.pop("_start_delay", None)
self.subscriptions[store_key] = (args, kwargs)
self.validate(start_delay=start_delay)
def remove(self, store_key):
if self._is_ticking:
self._to_remove.append(store_key)
else:
self.subscriptions.pop(store_key, False)
self.validate()
def stop(self):
self.subscriptions = {}
self.validate()
class TickerPool(object):
ticker_class = Ticker
def __init__(self):
self.tickers = {}
def add(self, store_key, *args, **kwargs):
_, _, _, interval, _, _ = store_key
if not interval:
log_err(_ERROR_ADD_TICKER.format(store_key=store_key))
return
if interval not in self.tickers:
self.tickers[interval] = self.ticker_class(interval)
self.tickers[interval].add(store_key, *args, **kwargs)
def remove(self, store_key):
_, _, _, interval, _, _ = store_key
if interval in self.tickers:
self.tickers[interval].remove(store_key)
if not self.tickers[interval]:
del self.tickers[interval]
def stop(self, interval=None):
if interval and interval in self.tickers:
self.tickers[interval].stop()
else:
for ticker in self.tickers.values():
ticker.stop()
class TickerHandler(object):
ticker_pool_class = TickerPool
def __init__(self, save_name="ticker_storage"):
self.ticker_storage = {}
self.save_name = save_name
self.ticker_pool = self.ticker_pool_class()
def _get_callback(self, callback):
outobj, outpath, outcallfunc = None, None, None
if callable(callback):
if inspect.ismethod(callback):
outobj = callback.im_self
outcallfunc = callback.im_func.func_name
elif inspect.isfunction(callback):
outpath = "%s.%s" % (callback.__module__, callback.func_name)
outcallfunc = callback
else:
raise TypeError("%s is not a callable function or method." % callback)
return outobj, outpath, outcallfunc
def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True):
interval = int(interval)
persistent = bool(persistent)
packed_obj = pack_dbobj(obj)
methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None
outpath = path if path and isinstance(path, basestring) else None
return (packed_obj, methodname, outpath, interval, idstring, persistent)
def save(self):
if self.ticker_storage:
start_delays = dict((interval, ticker.task.next_call_time())
for interval, ticker in self.ticker_pool.tickers.items())
to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items()
if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and
hasattr(kwargs["_obj"], store_key[1])) or
store_key[2])}
for store_key, (args, kwargs) in to_save.items():
interval = store_key[1]
kwargs["_start_delay"] = start_delays.get(interval, None)
ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save))
else:
# make sure we have nothing lingering in the database
ServerConfig.objects.conf(key=self.save_name, delete=True)
def restore(self, server_reload=True):
# load stored command instructions and use them to re-initialize handler
restored_tickers = ServerConfig.objects.conf(key=self.save_name)
if restored_tickers:
# the dbunserialize will convert all serialized dbobjs to real objects
restored_tickers = dbunserialize(restored_tickers)
self.ticker_storage = {}
for store_key, (args, kwargs) in restored_tickers.iteritems():
try:
# at this point obj is the actual object (or None) due to how
# the dbunserialize works
obj, callfunc, path, interval, idstring, persistent = store_key
if not persistent and not server_reload:
# this ticker will not be restarted
continue
if isinstance(callfunc, basestring) and not obj:
# methods must have an existing object
continue
# we must rebuild the store_key here since obj must not be
# stored as the object itself for the store_key to be hashable.
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
if obj and callfunc:
kwargs["_callback"] = callfunc
kwargs["_obj"] = obj
elif path:
modname, varname = path.rsplit(".", 1)
callback = variable_from_module(modname, varname)
kwargs["_callback"] = callback
kwargs["_obj"] = None
else:
# Neither object nor path - discard this ticker
log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key))
continue
except Exception:
# this suggests a malformed save or missing objects
log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key))
continue
# if we get here we should create a new ticker
self.ticker_storage[store_key] = (args, kwargs)
self.ticker_pool.add(store_key, *args, **kwargs)
def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs):
if isinstance(callback, int):
raise RuntimeError("TICKER_HANDLER.add has changed: "
"the interval is now the first argument, callback the second.")
obj, path, callfunc = self._get_callback(callback)
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
kwargs["_obj"] = obj
kwargs["_callback"] = callfunc # either method-name or callable
self.ticker_storage[store_key] = (args, kwargs)
self.ticker_pool.add(store_key, *args, **kwargs)
self.save()
def remove(self, interval=60, callback=None, idstring="", persistent=True):
if isinstance(callback, int):
raise RuntimeError("TICKER_HANDLER.remove has changed: "
"the interval is now the first argument, callback the second.")
obj, path, callfunc = self._get_callback(callback)
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
to_remove = self.ticker_storage.pop(store_key, None)
if to_remove:
self.ticker_pool.remove(store_key)
self.save()
def clear(self, interval=None):
self.ticker_pool.stop(interval)
if interval:
self.ticker_storage = dict((store_key, store_key)
for store_key in self.ticker_storage
if store_key[1] != interval)
else:
self.ticker_storage = {}
self.save()
def all(self, interval=None):
if interval is None:
# return dict of all, ordered by interval
return dict((interval, ticker.subscriptions)
for interval, ticker in self.ticker_pool.tickers.iteritems())
else:
# get individual interval
ticker = self.ticker_pool.tickers.get(interval, None)
if ticker:
return {interval: ticker.subscriptions}
def all_display(self):
store_keys = []
for ticker in self.ticker_pool.tickers.itervalues():
for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems():
store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent))
return store_keys
# main tickerhandler
TICKER_HANDLER = TickerHandler()
| true
| true
|
790b73ae7c3db5599aebd76c699462b81c903bb3
| 3,134
|
py
|
Python
|
contrib/example.py
|
MScienceLLC/python-mscience-cachetclient
|
f7a96685c05fdfed5723ed912ea6cc6a685f8c38
|
[
"Apache-2.0"
] | 40
|
2016-07-28T04:09:47.000Z
|
2020-10-01T13:00:48.000Z
|
contrib/example.py
|
MScienceLLC/python-mscience-cachetclient
|
f7a96685c05fdfed5723ed912ea6cc6a685f8c38
|
[
"Apache-2.0"
] | 12
|
2016-08-15T23:17:20.000Z
|
2018-12-17T15:15:25.000Z
|
contrib/example.py
|
MScienceLLC/python-mscience-cachetclient
|
f7a96685c05fdfed5723ed912ea6cc6a685f8c38
|
[
"Apache-2.0"
] | 20
|
2016-07-28T04:09:57.000Z
|
2021-11-06T11:04:42.000Z
|
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import cachetclient.cachet as cachet
import json
ENDPOINT = 'http://status.domain.tld/api/v1'
API_TOKEN = 'token'
# /ping
ping = cachet.Ping(endpoint=ENDPOINT)
print(ping.get())
# /version
version = cachet.Version(endpoint=ENDPOINT)
print(version.get())
# /components
components = cachet.Components(endpoint=ENDPOINT, api_token=API_TOKEN)
new_component = json.loads(components.post(name='Test component',
status=1,
description='Test component'))
print(components.get())
components.put(id=new_component['data']['id'], description='Updated component')
print(components.get(id=new_component['data']['id']))
components.delete(id=new_component['data']['id'])
# /components/groups
groups = cachet.Groups(endpoint=ENDPOINT, api_token=API_TOKEN)
new_group = json.loads(groups.post(name='Test group'))
print(groups.get())
groups.put(id=new_group['data']['id'], name='Updated group')
print(groups.get(id=new_group['data']['id']))
groups.delete(new_group['data']['id'])
# /incidents
incidents = cachet.Incidents(endpoint=ENDPOINT, api_token=API_TOKEN)
new_incident = json.loads(incidents.post(name='Test incident',
message='Houston, we have a problem.',
status=1))
print(incidents.get())
incidents.put(id=new_incident['data']['id'],
message="There's another problem, Houston.")
print(incidents.get(id=new_incident['data']['id']))
incidents.delete(id=new_incident['data']['id'])
# /metrics
# /metrics/points
metrics = cachet.Metrics(endpoint=ENDPOINT, api_token=API_TOKEN)
new_metric = json.loads(metrics.post(name='Test metric',
suffix='Numbers per hour',
description='How many numbers per hour',
default_value=0))
print(metrics.get())
print(metrics.get(id=new_metric['data']['id']))
points = cachet.Points(endpoint=ENDPOINT, api_token=API_TOKEN)
new_point = json.loads(points.post(id=new_metric['data']['id'], value=5))
print(points.get(metric_id=new_metric['data']['id']))
points.delete(metric_id=new_metric['data']['id'],
point_id=new_point['data']['id'])
metrics.delete(id=new_metric['data']['id'])
# /subscribers
subscribers = cachet.Subscribers(endpoint=ENDPOINT, api_token=API_TOKEN)
new_subscriber = json.loads(subscribers.post(email='test@test.org'))
subscribers.delete(id=new_subscriber['data']['id'])
| 38.691358
| 79
| 0.674537
|
import cachetclient.cachet as cachet
import json
ENDPOINT = 'http://status.domain.tld/api/v1'
API_TOKEN = 'token'
ping = cachet.Ping(endpoint=ENDPOINT)
print(ping.get())
version = cachet.Version(endpoint=ENDPOINT)
print(version.get())
components = cachet.Components(endpoint=ENDPOINT, api_token=API_TOKEN)
new_component = json.loads(components.post(name='Test component',
status=1,
description='Test component'))
print(components.get())
components.put(id=new_component['data']['id'], description='Updated component')
print(components.get(id=new_component['data']['id']))
components.delete(id=new_component['data']['id'])
groups = cachet.Groups(endpoint=ENDPOINT, api_token=API_TOKEN)
new_group = json.loads(groups.post(name='Test group'))
print(groups.get())
groups.put(id=new_group['data']['id'], name='Updated group')
print(groups.get(id=new_group['data']['id']))
groups.delete(new_group['data']['id'])
incidents = cachet.Incidents(endpoint=ENDPOINT, api_token=API_TOKEN)
new_incident = json.loads(incidents.post(name='Test incident',
message='Houston, we have a problem.',
status=1))
print(incidents.get())
incidents.put(id=new_incident['data']['id'],
message="There's another problem, Houston.")
print(incidents.get(id=new_incident['data']['id']))
incidents.delete(id=new_incident['data']['id'])
# /metrics
# /metrics/points
metrics = cachet.Metrics(endpoint=ENDPOINT, api_token=API_TOKEN)
new_metric = json.loads(metrics.post(name='Test metric',
suffix='Numbers per hour',
description='How many numbers per hour',
default_value=0))
print(metrics.get())
print(metrics.get(id=new_metric['data']['id']))
points = cachet.Points(endpoint=ENDPOINT, api_token=API_TOKEN)
new_point = json.loads(points.post(id=new_metric['data']['id'], value=5))
print(points.get(metric_id=new_metric['data']['id']))
points.delete(metric_id=new_metric['data']['id'],
point_id=new_point['data']['id'])
metrics.delete(id=new_metric['data']['id'])
# /subscribers
subscribers = cachet.Subscribers(endpoint=ENDPOINT, api_token=API_TOKEN)
new_subscriber = json.loads(subscribers.post(email='test@test.org'))
subscribers.delete(id=new_subscriber['data']['id'])
| true
| true
|
790b760f746e75581d92be5781f464a378d95276
| 1,152
|
py
|
Python
|
spotdl/console/save.py
|
phcreery/spotdl-v4
|
3bd3768de10ae80b5e1ba3bbe6b792f7fc9f8dfc
|
[
"MIT"
] | null | null | null |
spotdl/console/save.py
|
phcreery/spotdl-v4
|
3bd3768de10ae80b5e1ba3bbe6b792f7fc9f8dfc
|
[
"MIT"
] | null | null | null |
spotdl/console/save.py
|
phcreery/spotdl-v4
|
3bd3768de10ae80b5e1ba3bbe6b792f7fc9f8dfc
|
[
"MIT"
] | null | null | null |
"""
Save module for the console.
"""
import json
from typing import List, Optional
from spotdl.utils.search import parse_query
from spotdl.utils.m3u import create_m3u_file
def save(
query: List[str],
save_path: str,
downloader,
m3u_file: Optional[str] = None,
) -> None:
"""
Save metadata from spotify to the disk.
### Arguments
- query: list of strings to search for.
- save_path: Path to the file to save the metadata to.
- threads: Number of threads to use.
### Notes
- This function is multi-threaded.
"""
# Parse the query
songs = parse_query(query, downloader.threads)
# Convert the songs to JSON
save_data = [song.json for song in songs]
# Save the songs to a file
with open(save_path, "w", encoding="utf-8") as save_file:
json.dump(save_data, save_file, indent=4, ensure_ascii=False)
if m3u_file:
create_m3u_file(
m3u_file, songs, downloader.output, downloader.output_format, False
)
downloader.progress_handler.log(
f"Saved {len(save_data)} song{'s' if len(save_data) > 1 else ''} to {save_path}"
)
| 23.510204
| 88
| 0.653646
|
import json
from typing import List, Optional
from spotdl.utils.search import parse_query
from spotdl.utils.m3u import create_m3u_file
def save(
query: List[str],
save_path: str,
downloader,
m3u_file: Optional[str] = None,
) -> None:
songs = parse_query(query, downloader.threads)
save_data = [song.json for song in songs]
with open(save_path, "w", encoding="utf-8") as save_file:
json.dump(save_data, save_file, indent=4, ensure_ascii=False)
if m3u_file:
create_m3u_file(
m3u_file, songs, downloader.output, downloader.output_format, False
)
downloader.progress_handler.log(
f"Saved {len(save_data)} song{'s' if len(save_data) > 1 else ''} to {save_path}"
)
| true
| true
|
790b767e6c41a97298f27ea96ca9c7f56d49ea7b
| 3,690
|
py
|
Python
|
epinet_fun/util.py
|
marmus12/CornerView
|
f76cd1cb4c402c59bafbf66b5e038c2d1ab9610b
|
[
"MIT"
] | 3
|
2020-03-27T13:36:18.000Z
|
2021-11-28T13:56:15.000Z
|
epinet_fun/util.py
|
marmus12/CornerView
|
f76cd1cb4c402c59bafbf66b5e038c2d1ab9610b
|
[
"MIT"
] | null | null | null |
epinet_fun/util.py
|
marmus12/CornerView
|
f76cd1cb4c402c59bafbf66b5e038c2d1ab9610b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 15:54:01 2018
@author: shinyonsei2
"""
import numpy as np
import imageio
def read_pfm(fpath, expected_identifier="Pf"):
# PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html
def _get_next_line(f):
next_line = f.readline().decode('utf-8').rstrip()
# ignore comments
while next_line.startswith('#'):
next_line = f.readline().rstrip()
return next_line
with open(fpath, 'rb') as f:
# header
identifier = _get_next_line(f)
if identifier != expected_identifier:
raise Exception('Unknown identifier. Expected: "%s", got: "%s".' % (expected_identifier, identifier))
try:
line_dimensions = _get_next_line(f)
dimensions = line_dimensions.split(' ')
width = int(dimensions[0].strip())
height = int(dimensions[1].strip())
except:
raise Exception('Could not parse dimensions: "%s". '
'Expected "width height", e.g. "512 512".' % line_dimensions)
try:
line_scale = _get_next_line(f)
scale = float(line_scale)
assert scale != 0
if scale < 0:
endianness = "<"
else:
endianness = ">"
except:
raise Exception('Could not parse max value / endianess information: "%s". '
'Should be a non-zero number.' % line_scale)
try:
data = np.fromfile(f, "%sf" % endianness)
data = np.reshape(data, (height, width))
data = np.flipud(data)
with np.errstate(invalid="ignore"):
data *= abs(scale)
except:
raise Exception('Invalid binary values. Could not create %dx%d array from input.' % (height, width))
return data
def load_LFdata(dir_LFimages,hci_root):
traindata_all=np.zeros((len(dir_LFimages), 512, 512, 9, 9, 3),np.uint8)
traindata_label=np.zeros((len(dir_LFimages), 512, 512),np.float32)
image_id=0
for dir_LFimage in dir_LFimages:
print(dir_LFimage)
for i in range(81):
try:
tmp = np.float32(imageio.imread(hci_root + dir_LFimage+'/input_Cam0%.2d.png' % i)) # load LF images(9x9)
except:
print(hci_root + dir_LFimage+'/input_Cam0%.2d.png..does not exist' % i )
traindata_all[image_id,:,:,i//9,i-9*(i//9),:]=tmp
del tmp
try:
tmp = np.float32(read_pfm(hci_root +dir_LFimage+'/gt_disp_lowres.pfm')) # load LF disparity map
except:
print(hci_root + dir_LFimage+'/gt_disp_lowres.pfm..does not exist' % i )
traindata_label[image_id,:,:]=tmp
del tmp
image_id=image_id+1
return traindata_all, traindata_label
def load_depth_gts(gt_dir,dir_LFimages):
w_views = 9
n_views = w_views**2
traindata_label=np.zeros((len(dir_LFimages), 512, 512, n_views),np.float32)
image_id=0
for dir_LFimage in dir_LFimages:
sample_name = dir_LFimage.split('/')[-1]
print("loading additional gt.. " + sample_name)
for i in range(n_views):
# try: 0%.2d.png
tmp = np.float32(read_pfm(gt_dir +sample_name+'/gt_disp_lowres_Cam0%.2d.pfm' %i)) # load LF disparity map
# except:
# print(hci_root + dir_LFimage+'\gt_disp_lowres.pfm..does not exist' % i )
traindata_label[image_id,:,:,i]=tmp
del tmp
image_id=image_id+1
return traindata_label
| 34.485981
| 122
| 0.566938
|
import numpy as np
import imageio
def read_pfm(fpath, expected_identifier="Pf"):
def _get_next_line(f):
next_line = f.readline().decode('utf-8').rstrip()
while next_line.startswith('#'):
next_line = f.readline().rstrip()
return next_line
with open(fpath, 'rb') as f:
identifier = _get_next_line(f)
if identifier != expected_identifier:
raise Exception('Unknown identifier. Expected: "%s", got: "%s".' % (expected_identifier, identifier))
try:
line_dimensions = _get_next_line(f)
dimensions = line_dimensions.split(' ')
width = int(dimensions[0].strip())
height = int(dimensions[1].strip())
except:
raise Exception('Could not parse dimensions: "%s". '
'Expected "width height", e.g. "512 512".' % line_dimensions)
try:
line_scale = _get_next_line(f)
scale = float(line_scale)
assert scale != 0
if scale < 0:
endianness = "<"
else:
endianness = ">"
except:
raise Exception('Could not parse max value / endianess information: "%s". '
'Should be a non-zero number.' % line_scale)
try:
data = np.fromfile(f, "%sf" % endianness)
data = np.reshape(data, (height, width))
data = np.flipud(data)
with np.errstate(invalid="ignore"):
data *= abs(scale)
except:
raise Exception('Invalid binary values. Could not create %dx%d array from input.' % (height, width))
return data
def load_LFdata(dir_LFimages,hci_root):
traindata_all=np.zeros((len(dir_LFimages), 512, 512, 9, 9, 3),np.uint8)
traindata_label=np.zeros((len(dir_LFimages), 512, 512),np.float32)
image_id=0
for dir_LFimage in dir_LFimages:
print(dir_LFimage)
for i in range(81):
try:
tmp = np.float32(imageio.imread(hci_root + dir_LFimage+'/input_Cam0%.2d.png' % i))
except:
print(hci_root + dir_LFimage+'/input_Cam0%.2d.png..does not exist' % i )
traindata_all[image_id,:,:,i//9,i-9*(i//9),:]=tmp
del tmp
try:
tmp = np.float32(read_pfm(hci_root +dir_LFimage+'/gt_disp_lowres.pfm'))
except:
print(hci_root + dir_LFimage+'/gt_disp_lowres.pfm..does not exist' % i )
traindata_label[image_id,:,:]=tmp
del tmp
image_id=image_id+1
return traindata_all, traindata_label
def load_depth_gts(gt_dir,dir_LFimages):
w_views = 9
n_views = w_views**2
traindata_label=np.zeros((len(dir_LFimages), 512, 512, n_views),np.float32)
image_id=0
for dir_LFimage in dir_LFimages:
sample_name = dir_LFimage.split('/')[-1]
print("loading additional gt.. " + sample_name)
for i in range(n_views):
tmp = np.float32(read_pfm(gt_dir +sample_name+'/gt_disp_lowres_Cam0%.2d.pfm' %i))
traindata_label[image_id,:,:,i]=tmp
del tmp
image_id=image_id+1
return traindata_label
| true
| true
|
790b78db9dc64faf1a637d5079d688e7980bbbab
| 8,543
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary.marker.colorbar"
_path_str = "scatterternary.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.469298
| 84
| 0.569004
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary.marker.colorbar"
_path_str = "scatterternary.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
@property
def color(self):
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def size(self):
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| true
| true
|
790b792a2375b0c1f28dfbf256333d23569b151f
| 2,154
|
py
|
Python
|
piot/outputs/amqp.py
|
frantp/iot-sensor-reader
|
8e9b95f7d8a72b594bf121a626cc16ebe54d9d7d
|
[
"Apache-2.0"
] | null | null | null |
piot/outputs/amqp.py
|
frantp/iot-sensor-reader
|
8e9b95f7d8a72b594bf121a626cc16ebe54d9d7d
|
[
"Apache-2.0"
] | null | null | null |
piot/outputs/amqp.py
|
frantp/iot-sensor-reader
|
8e9b95f7d8a72b594bf121a626cc16ebe54d9d7d
|
[
"Apache-2.0"
] | null | null | null |
from queue import Queue, Empty, Full
from ..core import DriverBase, format_msg
import pika
class Driver(DriverBase):
def __init__(self, exchange, queue, routing_key=None, buffer_maxsize=None,
*args, **kwargs):
super().__init__()
self._args = args
self._kwargs = kwargs
self._exchange = exchange
self._queue = queue
self._routing_key = routing_key or queue
self._buffer = Queue(buffer_maxsize) \
if buffer_maxsize is not None else None
self._declared = False
def run(self, driver_id, ts, fields, tags):
if not fields:
return
msg = format_msg(ts, driver_id, tags, fields)
try:
with pika.BlockingConnection(
pika.ConnectionParameters(*self._args, **self._kwargs)) as c:
channel = c.channel()
self._publish(channel, msg)
# Flush buffer
if self._buffer is not None:
try:
while True:
msg = self._buffer.get_nowait()
self._publish(channel, msg)
except Empty:
pass
except pika.exceptions.AMQPError:
# Add to buffer
if self._buffer is not None:
try:
self._buffer.put_nowait(msg)
except Full:
pass
def _declare(self, channel):
if not self._declared:
channel.exchange_declare(exchange=self._exchange, durable=True)
channel.queue_declare(queue=self._queue, durable=True)
channel.queue_bind(
exchange=self._exchange,
queue=self._queue,
routing_key=self._routing_key
)
self._declared = True
def _publish(self, channel, msg):
self._declare(channel)
channel.basic_publish(
exchange=self._exchange,
routing_key=self._routing_key,
body=msg,
properties=pika.BasicProperties(delivery_mode=2)
)
| 33.65625
| 78
| 0.539926
|
from queue import Queue, Empty, Full
from ..core import DriverBase, format_msg
import pika
class Driver(DriverBase):
def __init__(self, exchange, queue, routing_key=None, buffer_maxsize=None,
*args, **kwargs):
super().__init__()
self._args = args
self._kwargs = kwargs
self._exchange = exchange
self._queue = queue
self._routing_key = routing_key or queue
self._buffer = Queue(buffer_maxsize) \
if buffer_maxsize is not None else None
self._declared = False
def run(self, driver_id, ts, fields, tags):
if not fields:
return
msg = format_msg(ts, driver_id, tags, fields)
try:
with pika.BlockingConnection(
pika.ConnectionParameters(*self._args, **self._kwargs)) as c:
channel = c.channel()
self._publish(channel, msg)
if self._buffer is not None:
try:
while True:
msg = self._buffer.get_nowait()
self._publish(channel, msg)
except Empty:
pass
except pika.exceptions.AMQPError:
if self._buffer is not None:
try:
self._buffer.put_nowait(msg)
except Full:
pass
def _declare(self, channel):
if not self._declared:
channel.exchange_declare(exchange=self._exchange, durable=True)
channel.queue_declare(queue=self._queue, durable=True)
channel.queue_bind(
exchange=self._exchange,
queue=self._queue,
routing_key=self._routing_key
)
self._declared = True
def _publish(self, channel, msg):
self._declare(channel)
channel.basic_publish(
exchange=self._exchange,
routing_key=self._routing_key,
body=msg,
properties=pika.BasicProperties(delivery_mode=2)
)
| true
| true
|
790b793a127e65afd85e24cd9976853a08eef6e2
| 11,584
|
py
|
Python
|
gammapy/data/data_store.py
|
qpiel/gammapy
|
cfb976909e63f4d5d578e1495245c0baad69482b
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/data/data_store.py
|
qpiel/gammapy
|
cfb976909e63f4d5d578e1495245c0baad69482b
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T19:55:46.000Z
|
2020-10-29T19:55:46.000Z
|
gammapy/data/data_store.py
|
qpiel/gammapy
|
cfb976909e63f4d5d578e1495245c0baad69482b
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import subprocess
from ..utils.scripts import make_path
from ..utils.testing import Checker
from .obs_table import ObservationTable
from .hdu_index_table import HDUIndexTable
from .obs_table import ObservationTableChecker
from .observations import DataStoreObservation, Observations, ObservationChecker
__all__ = ["DataStore"]
log = logging.getLogger(__name__)
class DataStore(object):
"""IACT data store.
The data selection and access happens using an observation
and an HDU index file as described at :ref:`gadf:iact-storage`.
See :gp-extra-notebook:`cta_1dc_introduction` for usage examples.
Parameters
----------
hdu_table : `~gammapy.data.HDUIndexTable`
HDU index table
obs_table : `~gammapy.data.ObservationTable`
Observation index table
Examples
--------
Here's an example how to create a `DataStore` to access H.E.S.S. data:
>>> from gammapy.data import DataStore
>>> data_store = DataStore.from_dir('$GAMMAPY_DATA/hess-dl3-dr1')
>>> data_store.info()
"""
DEFAULT_HDU_TABLE = "hdu-index.fits.gz"
"""Default HDU table filename."""
DEFAULT_OBS_TABLE = "obs-index.fits.gz"
"""Default observation table filename."""
def __init__(self, hdu_table=None, obs_table=None):
self.hdu_table = hdu_table
self.obs_table = obs_table
def __str__(self):
return self.info(show=False)
@classmethod
def from_file(cls, filename, hdu_hdu="HDU_INDEX", hdu_obs="OBS_INDEX"):
"""Create from a FITS file.
The FITS file must contain both index files.
Parameters
----------
filename : str, Path
FITS filename
hdu_hdu : str or int
FITS HDU name or number for the HDU index table
hdu_obs : str or int
FITS HDU name or number for the observation index table
"""
filename = make_path(filename)
hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format="fits")
obs_table = ObservationTable.read(filename, hdu=hdu_obs, format="fits")
return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod
def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None):
"""Create from a directory.
Parameters
----------
base_dir : str, Path
Base directory of the data files.
hdu_table_filename : str, Path
Filename of the HDU index file. May be specified either relative
to `base_dir` or as an absolute path. If None, the default filename
will be looked for.
obs_table_filename : str, Path
Filename of the observation index file. May be specified either relative
to `base_dir` or as an absolute path. If None, the default filename
will be looked for.
"""
base_dir = make_path(base_dir)
if hdu_table_filename:
hdu_table_filename = make_path(hdu_table_filename)
if (base_dir / hdu_table_filename).exists():
hdu_table_filename = base_dir / hdu_table_filename
else:
hdu_table_filename = base_dir / cls.DEFAULT_HDU_TABLE
if obs_table_filename:
obs_table_filename = make_path(obs_table_filename)
if (base_dir / obs_table_filename).exists():
obs_table_filename = base_dir / obs_table_filename
else:
obs_table_filename = base_dir / cls.DEFAULT_OBS_TABLE
if not hdu_table_filename.exists():
raise IOError("File not found: {}".format(hdu_table_filename))
log.debug("Reading {}".format(hdu_table_filename))
hdu_table = HDUIndexTable.read(str(hdu_table_filename), format="fits")
hdu_table.meta["BASE_DIR"] = str(base_dir)
if not obs_table_filename.exists():
raise IOError("File not found: {}".format(obs_table_filename))
log.debug("Reading {}".format(str(obs_table_filename)))
obs_table = ObservationTable.read(str(obs_table_filename), format="fits")
return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod
def from_config(cls, config):
"""Create from a config dict."""
base_dir = config["base_dir"]
hdu_table_filename = config.get("hduindx", cls.DEFAULT_HDU_TABLE)
obs_table_filename = config.get("obsindx", cls.DEFAULT_OBS_TABLE)
hdu_table_filename = cls._find_file(hdu_table_filename, base_dir)
obs_table_filename = cls._find_file(obs_table_filename, base_dir)
return cls.from_files(
base_dir=base_dir,
hdu_table_filename=hdu_table_filename,
obs_table_filename=obs_table_filename,
)
@staticmethod
def _find_file(filename, dir):
"""Find a file at an absolute or relative location.
- First tries ``Path(filename)``
- Second tries ``Path(dir) / filename``
- Raises ``OSError`` if both don't exist.
"""
path1 = make_path(filename)
path2 = make_path(dir) / filename
if path1.is_file():
filename = path1
elif path2.is_file():
filename = path2
else:
raise OSError("File not found at {} or {}".format(path1, path2))
return filename
def info(self, show=True):
"""Print some info."""
s = "Data store:\n"
s += self.hdu_table.summary()
s += "\n\n"
s += self.obs_table.summary()
if show:
print(s)
else:
return s
def obs(self, obs_id):
"""Access a given `~gammapy.data.DataStoreObservation`.
Parameters
----------
obs_id : int
Observation ID.
Returns
-------
observation : `~gammapy.data.DataStoreObservation`
Observation container
"""
return DataStoreObservation(obs_id=int(obs_id), data_store=self)
def get_observations(self, obs_id, skip_missing=False):
"""Generate a `~gammapy.data.Observations`.
Parameters
----------
obs_id : list
Observation IDs.
skip_missing : bool, optional
Skip missing observations, default: False
Returns
-------
observations : `~gammapy.data.Observations`
Container holding a list of `~gammapy.data.DataStoreObservation`
"""
obs_list = []
for _ in obs_id:
try:
obs = self.obs(_)
except ValueError as err:
if skip_missing:
log.warning("Skipping missing obs_id: {!r}".format(_))
continue
else:
raise err
else:
obs_list.append(obs)
return Observations(obs_list)
def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False):
"""Create a new `~gammapy.data.DataStore` containing a subset of observations.
Parameters
----------
obs_id : array-like, `~gammapy.data.ObservationTable`
List of observations to copy
outdir : str, Path
Directory for the new store
hdu_class : list of str
see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS`
verbose : bool
Print copied files
overwrite : bool
Overwrite
"""
# TODO : Does rsync give any benefits here?
outdir = make_path(outdir)
if isinstance(obs_id, ObservationTable):
obs_id = obs_id["OBS_ID"].data
hdutable = self.hdu_table
hdutable.add_index("OBS_ID")
with hdutable.index_mode("discard_on_copy"):
subhdutable = hdutable.loc[obs_id]
if hdu_class is not None:
subhdutable.add_index("HDU_CLASS")
with subhdutable.index_mode("discard_on_copy"):
subhdutable = subhdutable.loc[hdu_class]
subobstable = self.obs_table.select_obs_id(obs_id)
for idx in range(len(subhdutable)):
# Changes to the file structure could be made here
loc = subhdutable.location_info(idx)
targetdir = outdir / loc.file_dir
targetdir.mkdir(exist_ok=True, parents=True)
cmd = ["cp", "-v"] if verbose else ["cp"]
if not overwrite:
cmd += ["-n"]
cmd += [str(loc.path()), str(targetdir)]
subprocess.call(cmd)
filename = str(outdir / self.DEFAULT_HDU_TABLE)
subhdutable.write(filename, format="fits", overwrite=overwrite)
filename = str(outdir / self.DEFAULT_OBS_TABLE)
subobstable.write(filename, format="fits", overwrite=overwrite)
def check(self, checks="all"):
"""Check index tables and data files.
This is a generator that yields a list of dicts.
"""
checker = DataStoreChecker(self)
return checker.run(checks=checks)
class DataStoreChecker(Checker):
"""Check data store.
Checks data format and a bit about the content.
"""
CHECKS = {
"obs_table": "check_obs_table",
"hdu_table": "check_hdu_table",
"observations": "check_observations",
"consistency": "check_consistency",
}
def __init__(self, data_store):
self.data_store = data_store
def check_obs_table(self):
"""Checks for the observation index table."""
checker = ObservationTableChecker(self.data_store.obs_table)
for record in checker.run():
yield record
def check_hdu_table(self):
"""Checks for the HDU index table."""
t = self.data_store.hdu_table
m = t.meta
if m.get("HDUCLAS1", "") != "INDEX":
yield {
"level": "error",
"hdu": "hdu-index",
"msg": "Invalid header key. Must have HDUCLAS1=INDEX",
}
if m.get("HDUCLAS2", "") != "HDU":
yield {
"level": "error",
"hdu": "hdu-index",
"msg": "Invalid header key. Must have HDUCLAS2=HDU",
}
# Check that all HDU in the data files exist
for idx in range(len(t)):
location_info = t.location_info(idx)
try:
location_info.get_hdu()
except KeyError:
yield {
"level": "error",
"msg": "HDU not found: {!r}".format(location_info.__dict__),
}
def check_consistency(self):
"""Consistency checks between multiple HDUs"""
# obs and HDU index should have the same OBS_ID
obs_table_obs_id = set(self.data_store.obs_table["OBS_ID"])
hdu_table_obs_id = set(self.data_store.hdu_table["OBS_ID"])
if not obs_table_obs_id == hdu_table_obs_id:
yield {
"level": "error",
"msg": "Inconsistent OBS_ID in obs and HDU index tables",
}
# TODO: obs table and events header should have the same times
def check_observations(self):
"""Perform some sanity checks for all observations."""
for obs_id in self.data_store.obs_table["OBS_ID"]:
obs = self.data_store.obs(obs_id)
for record in ObservationChecker(obs).run():
yield record
| 33.871345
| 87
| 0.601865
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import subprocess
from ..utils.scripts import make_path
from ..utils.testing import Checker
from .obs_table import ObservationTable
from .hdu_index_table import HDUIndexTable
from .obs_table import ObservationTableChecker
from .observations import DataStoreObservation, Observations, ObservationChecker
__all__ = ["DataStore"]
log = logging.getLogger(__name__)
class DataStore(object):
DEFAULT_HDU_TABLE = "hdu-index.fits.gz"
DEFAULT_OBS_TABLE = "obs-index.fits.gz"
def __init__(self, hdu_table=None, obs_table=None):
self.hdu_table = hdu_table
self.obs_table = obs_table
def __str__(self):
return self.info(show=False)
@classmethod
def from_file(cls, filename, hdu_hdu="HDU_INDEX", hdu_obs="OBS_INDEX"):
filename = make_path(filename)
hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format="fits")
obs_table = ObservationTable.read(filename, hdu=hdu_obs, format="fits")
return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod
def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None):
base_dir = make_path(base_dir)
if hdu_table_filename:
hdu_table_filename = make_path(hdu_table_filename)
if (base_dir / hdu_table_filename).exists():
hdu_table_filename = base_dir / hdu_table_filename
else:
hdu_table_filename = base_dir / cls.DEFAULT_HDU_TABLE
if obs_table_filename:
obs_table_filename = make_path(obs_table_filename)
if (base_dir / obs_table_filename).exists():
obs_table_filename = base_dir / obs_table_filename
else:
obs_table_filename = base_dir / cls.DEFAULT_OBS_TABLE
if not hdu_table_filename.exists():
raise IOError("File not found: {}".format(hdu_table_filename))
log.debug("Reading {}".format(hdu_table_filename))
hdu_table = HDUIndexTable.read(str(hdu_table_filename), format="fits")
hdu_table.meta["BASE_DIR"] = str(base_dir)
if not obs_table_filename.exists():
raise IOError("File not found: {}".format(obs_table_filename))
log.debug("Reading {}".format(str(obs_table_filename)))
obs_table = ObservationTable.read(str(obs_table_filename), format="fits")
return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod
def from_config(cls, config):
base_dir = config["base_dir"]
hdu_table_filename = config.get("hduindx", cls.DEFAULT_HDU_TABLE)
obs_table_filename = config.get("obsindx", cls.DEFAULT_OBS_TABLE)
hdu_table_filename = cls._find_file(hdu_table_filename, base_dir)
obs_table_filename = cls._find_file(obs_table_filename, base_dir)
return cls.from_files(
base_dir=base_dir,
hdu_table_filename=hdu_table_filename,
obs_table_filename=obs_table_filename,
)
@staticmethod
def _find_file(filename, dir):
path1 = make_path(filename)
path2 = make_path(dir) / filename
if path1.is_file():
filename = path1
elif path2.is_file():
filename = path2
else:
raise OSError("File not found at {} or {}".format(path1, path2))
return filename
def info(self, show=True):
s = "Data store:\n"
s += self.hdu_table.summary()
s += "\n\n"
s += self.obs_table.summary()
if show:
print(s)
else:
return s
def obs(self, obs_id):
return DataStoreObservation(obs_id=int(obs_id), data_store=self)
def get_observations(self, obs_id, skip_missing=False):
obs_list = []
for _ in obs_id:
try:
obs = self.obs(_)
except ValueError as err:
if skip_missing:
log.warning("Skipping missing obs_id: {!r}".format(_))
continue
else:
raise err
else:
obs_list.append(obs)
return Observations(obs_list)
def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False):
outdir = make_path(outdir)
if isinstance(obs_id, ObservationTable):
obs_id = obs_id["OBS_ID"].data
hdutable = self.hdu_table
hdutable.add_index("OBS_ID")
with hdutable.index_mode("discard_on_copy"):
subhdutable = hdutable.loc[obs_id]
if hdu_class is not None:
subhdutable.add_index("HDU_CLASS")
with subhdutable.index_mode("discard_on_copy"):
subhdutable = subhdutable.loc[hdu_class]
subobstable = self.obs_table.select_obs_id(obs_id)
for idx in range(len(subhdutable)):
loc = subhdutable.location_info(idx)
targetdir = outdir / loc.file_dir
targetdir.mkdir(exist_ok=True, parents=True)
cmd = ["cp", "-v"] if verbose else ["cp"]
if not overwrite:
cmd += ["-n"]
cmd += [str(loc.path()), str(targetdir)]
subprocess.call(cmd)
filename = str(outdir / self.DEFAULT_HDU_TABLE)
subhdutable.write(filename, format="fits", overwrite=overwrite)
filename = str(outdir / self.DEFAULT_OBS_TABLE)
subobstable.write(filename, format="fits", overwrite=overwrite)
def check(self, checks="all"):
checker = DataStoreChecker(self)
return checker.run(checks=checks)
class DataStoreChecker(Checker):
CHECKS = {
"obs_table": "check_obs_table",
"hdu_table": "check_hdu_table",
"observations": "check_observations",
"consistency": "check_consistency",
}
def __init__(self, data_store):
self.data_store = data_store
def check_obs_table(self):
checker = ObservationTableChecker(self.data_store.obs_table)
for record in checker.run():
yield record
def check_hdu_table(self):
t = self.data_store.hdu_table
m = t.meta
if m.get("HDUCLAS1", "") != "INDEX":
yield {
"level": "error",
"hdu": "hdu-index",
"msg": "Invalid header key. Must have HDUCLAS1=INDEX",
}
if m.get("HDUCLAS2", "") != "HDU":
yield {
"level": "error",
"hdu": "hdu-index",
"msg": "Invalid header key. Must have HDUCLAS2=HDU",
}
for idx in range(len(t)):
location_info = t.location_info(idx)
try:
location_info.get_hdu()
except KeyError:
yield {
"level": "error",
"msg": "HDU not found: {!r}".format(location_info.__dict__),
}
def check_consistency(self):
obs_table_obs_id = set(self.data_store.obs_table["OBS_ID"])
hdu_table_obs_id = set(self.data_store.hdu_table["OBS_ID"])
if not obs_table_obs_id == hdu_table_obs_id:
yield {
"level": "error",
"msg": "Inconsistent OBS_ID in obs and HDU index tables",
}
def check_observations(self):
for obs_id in self.data_store.obs_table["OBS_ID"]:
obs = self.data_store.obs(obs_id)
for record in ObservationChecker(obs).run():
yield record
| true
| true
|
790b79aaea7d8319d1b7e5c77c5817e21eb1675a
| 296
|
py
|
Python
|
models/user/errors.py
|
nealwobuhei/pricing-service
|
5ed936f169f91f341be6863464833540f37ce071
|
[
"MIT"
] | null | null | null |
models/user/errors.py
|
nealwobuhei/pricing-service
|
5ed936f169f91f341be6863464833540f37ce071
|
[
"MIT"
] | null | null | null |
models/user/errors.py
|
nealwobuhei/pricing-service
|
5ed936f169f91f341be6863464833540f37ce071
|
[
"MIT"
] | null | null | null |
class UserError(Exception):
def __init__(self, message):
self.message = message
class UserNotFoundError(UserError):
pass
class UserAlreadyRegisteredError(UserError):
pass
class InvalidEmailError(UserError):
pass
class IncorrectPasswordError(UserError):
pass
| 13.454545
| 44
| 0.733108
|
class UserError(Exception):
def __init__(self, message):
self.message = message
class UserNotFoundError(UserError):
pass
class UserAlreadyRegisteredError(UserError):
pass
class InvalidEmailError(UserError):
pass
class IncorrectPasswordError(UserError):
pass
| true
| true
|
790b7b6ed4419196de8d4867231b93dc74b25ccd
| 163
|
py
|
Python
|
frontend/urls.py
|
yi-syong/Benga
|
ef8cff66b8853d03c9615ef36d566cec27b9d9fd
|
[
"MIT"
] | 2
|
2018-10-22T08:02:09.000Z
|
2022-03-15T10:58:34.000Z
|
frontend/urls.py
|
yi-syong/Benga
|
ef8cff66b8853d03c9615ef36d566cec27b9d9fd
|
[
"MIT"
] | 81
|
2018-10-26T09:02:49.000Z
|
2020-10-27T06:30:02.000Z
|
frontend/urls.py
|
yi-syong/Benga
|
ef8cff66b8853d03c9615ef36d566cec27b9d9fd
|
[
"MIT"
] | 4
|
2018-10-22T05:18:20.000Z
|
2018-10-26T02:13:55.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.IndexView.as_view()),
path('non-release/', views.NonRelease.as_view()),
]
| 20.375
| 53
| 0.680982
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.IndexView.as_view()),
path('non-release/', views.NonRelease.as_view()),
]
| true
| true
|
790b7d638e4db9ae17a0d926eacc5e9c422ba75b
| 17,404
|
py
|
Python
|
tests/helpers/test_entity_component.py
|
mhammo30/home-assistant
|
a7452c096847194e9df8d0f57106140871f00b2f
|
[
"Apache-2.0"
] | null | null | null |
tests/helpers/test_entity_component.py
|
mhammo30/home-assistant
|
a7452c096847194e9df8d0f57106140871f00b2f
|
[
"Apache-2.0"
] | null | null | null |
tests/helpers/test_entity_component.py
|
mhammo30/home-assistant
|
a7452c096847194e9df8d0f57106140871f00b2f
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for the Entity component helper."""
# pylint: disable=protected-access
import asyncio
from collections import OrderedDict
import logging
import unittest
from unittest.mock import patch, Mock
from datetime import timedelta
import pytest
import homeassistant.core as ha
import homeassistant.loader as loader
from homeassistant.exceptions import PlatformNotReady
from homeassistant.components import group
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.helpers import discovery
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant, MockPlatform, MockModule, mock_coro,
async_fire_time_changed, MockEntity, MockConfigEntry,
mock_entity_platform, mock_integration)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
class TestHelpersEntityComponent(unittest.TestCase):
"""Test homeassistant.helpers.entity_component module."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize a test Home Assistant instance."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Clean up the test Home Assistant instance."""
self.hass.stop()
def test_setting_up_group(self):
"""Set up the setting of a group."""
setup_component(self.hass, 'group', {'group': {}})
component = EntityComponent(_LOGGER, DOMAIN, self.hass,
group_name='everyone')
# No group after setup
assert len(self.hass.states.entity_ids()) == 0
component.add_entities([MockEntity()])
self.hass.block_till_done()
# group exists
assert len(self.hass.states.entity_ids()) == 2
assert self.hass.states.entity_ids('group') == ['group.everyone']
group = self.hass.states.get('group.everyone')
assert group.attributes.get('entity_id') == \
('test_domain.unnamed_device',)
# group extended
component.add_entities([MockEntity(name='goodbye')])
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 3
group = self.hass.states.get('group.everyone')
# Ordered in order of added to the group
assert group.attributes.get('entity_id') == \
('test_domain.goodbye', 'test_domain.unnamed_device')
def test_setup_loads_platforms(self):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(self.hass,
MockModule('test_component', setup=component_setup))
# mock the dependencies
mock_integration(self.hass,
MockModule('mod2', dependencies=['test_component']))
mock_entity_platform(self.hass, 'test_domain.mod2',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({
DOMAIN: {
'platform': 'mod2',
}
})
self.hass.block_till_done()
assert component_setup.called
assert platform_setup.called
def test_setup_recovers_when_setup_raises(self):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
mock_entity_platform(self.hass, 'test_domain.mod1',
MockPlatform(platform1_setup))
mock_entity_platform(self.hass, 'test_domain.mod2',
MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(OrderedDict([
(DOMAIN, {'platform': 'mod1'}),
("{} 2".format(DOMAIN), {'platform': 'non_exist'}),
("{} 3".format(DOMAIN), {'platform': 'mod2'}),
]))
self.hass.block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@patch('homeassistant.helpers.entity_component.EntityComponent'
'._async_setup_platform', return_value=mock_coro())
@patch('homeassistant.setup.async_setup_component',
return_value=mock_coro(True))
def test_setup_does_discovery(self, mock_setup_component, mock_setup):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({})
discovery.load_platform(self.hass, DOMAIN, 'platform_test',
{'msg': 'discovery_info'}, {DOMAIN: {}})
self.hass.block_till_done()
assert mock_setup.called
assert ('platform_test', {}, {'msg': 'discovery_info'}) == \
mock_setup.call_args[0]
@patch('homeassistant.helpers.entity_platform.'
'async_track_time_interval')
def test_set_scan_interval_via_config(self, mock_track):
"""Test the setting of the scan interval via configuration."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(self.hass, 'test_domain.platform',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'scan_interval': timedelta(seconds=30),
}
})
self.hass.block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
def test_set_entity_namespace_via_config(self):
"""Test setting an entity namespace."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([
MockEntity(name='beer'),
MockEntity(name=None),
])
platform = MockPlatform(platform_setup)
mock_entity_platform(self.hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'entity_namespace': 'yummy'
}
})
self.hass.block_till_done()
assert sorted(self.hass.states.entity_ids()) == \
['test_domain.yummy_beer', 'test_domain.yummy_unnamed_device']
@asyncio.coroutine
def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2', available=False),
MockEntity(name='test_3'),
MockEntity(name='test_4', available=False),
])
call_1 = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call_1)))
call_2 = ha.ServiceCall('test', 'service', data={
'entity_id': ['test_domain.test_3', 'test_domain.test_4'],
})
assert ['test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call_2)))
@asyncio.coroutine
def test_platform_not_ready(hass):
"""Test that we retry when platform not ready."""
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady,
None])
loader.set_component(hass, 'mod1',
MockModule('mod1'))
loader.set_component(hass, 'mod1.test_domain',
MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup({
DOMAIN: {
'platform': 'mod1'
}
})
assert len(platform1_setup.mock_calls) == 1
assert 'test_domain.mod1' not in hass.config.components
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
# Should not trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
# Should trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert 'test_domain.mod1' not in hass.config.components
# This should not trigger attempt 3
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
# Trigger attempt 3, which succeeds
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert 'test_domain.mod1' in hass.config.components
@asyncio.coroutine
def test_extract_from_service_returns_all_if_no_entity_id(hass):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call)))
@asyncio.coroutine
def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['test_domain.test_2', 'test_domain.non_exist']
})
assert ['test_domain.test_2'] == \
[ent.entity_id for ent
in (yield from component.async_extract_from_service(call))]
@asyncio.coroutine
def test_extract_from_service_no_group_expand(hass):
"""Test not expanding a group."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
test_group = yield from group.Group.async_create_group(
hass, 'test_group', ['light.Ceiling', 'light.Kitchen'])
yield from component.async_add_entities([test_group])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['group.test_group']
})
extracted = yield from component.async_extract_from_service(
call, expand_group=False)
assert extracted == [test_group]
@asyncio.coroutine
def test_setup_dependencies_platform(hass):
"""Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded.
"""
loader.set_component(hass, 'test_component',
MockModule('test_component',
dependencies=['test_component2']))
loader.set_component(hass, 'test_component2',
MockModule('test_component2'))
loader.set_component(hass, 'test_component.test_domain', MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup({
DOMAIN: {
'platform': 'test_component',
}
})
assert 'test_component' in hass.config.components
assert 'test_component2' in hass.config.components
assert 'test_domain.test_component' in hass.config.components
async def test_setup_entry(hass):
"""Test setup entry calls async_setup_entry on platform."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry,
scan_interval=timedelta(seconds=5)))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, p_add_entities = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == \
timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
"""Test setup entry fails if platform doesnt exist."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='non_existing')
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
"""Test we don't allow setting up a config entry twice."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
"""Test unloading an entry removes all entities."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
"""."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
"""Test that we can update an entity with the helper."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
await component.async_add_entities([entity])
# Called as part of async_add_entities
assert len(entity.async_update_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 2
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
"""Test race condition on setting service."""
exception = False
def async_loop_exception_handler(_, _2) -> None:
"""Handle all exception inside the core loop."""
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, 'group', {})
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name='yo')
for i in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') in caplog.text
async def test_extract_all_use_match_all(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {'entity_id': 'all'})
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') not in caplog.text
| 35.230769
| 78
| 0.673006
|
import asyncio
from collections import OrderedDict
import logging
import unittest
from unittest.mock import patch, Mock
from datetime import timedelta
import pytest
import homeassistant.core as ha
import homeassistant.loader as loader
from homeassistant.exceptions import PlatformNotReady
from homeassistant.components import group
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.helpers import discovery
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant, MockPlatform, MockModule, mock_coro,
async_fire_time_changed, MockEntity, MockConfigEntry,
mock_entity_platform, mock_integration)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
class TestHelpersEntityComponent(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
def tearDown(self):
self.hass.stop()
def test_setting_up_group(self):
setup_component(self.hass, 'group', {'group': {}})
component = EntityComponent(_LOGGER, DOMAIN, self.hass,
group_name='everyone')
assert len(self.hass.states.entity_ids()) == 0
component.add_entities([MockEntity()])
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 2
assert self.hass.states.entity_ids('group') == ['group.everyone']
group = self.hass.states.get('group.everyone')
assert group.attributes.get('entity_id') == \
('test_domain.unnamed_device',)
component.add_entities([MockEntity(name='goodbye')])
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 3
group = self.hass.states.get('group.everyone')
assert group.attributes.get('entity_id') == \
('test_domain.goodbye', 'test_domain.unnamed_device')
def test_setup_loads_platforms(self):
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(self.hass,
MockModule('test_component', setup=component_setup))
mock_integration(self.hass,
MockModule('mod2', dependencies=['test_component']))
mock_entity_platform(self.hass, 'test_domain.mod2',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({
DOMAIN: {
'platform': 'mod2',
}
})
self.hass.block_till_done()
assert component_setup.called
assert platform_setup.called
def test_setup_recovers_when_setup_raises(self):
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
mock_entity_platform(self.hass, 'test_domain.mod1',
MockPlatform(platform1_setup))
mock_entity_platform(self.hass, 'test_domain.mod2',
MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(OrderedDict([
(DOMAIN, {'platform': 'mod1'}),
("{} 2".format(DOMAIN), {'platform': 'non_exist'}),
("{} 3".format(DOMAIN), {'platform': 'mod2'}),
]))
self.hass.block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@patch('homeassistant.helpers.entity_component.EntityComponent'
'._async_setup_platform', return_value=mock_coro())
@patch('homeassistant.setup.async_setup_component',
return_value=mock_coro(True))
def test_setup_does_discovery(self, mock_setup_component, mock_setup):
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({})
discovery.load_platform(self.hass, DOMAIN, 'platform_test',
{'msg': 'discovery_info'}, {DOMAIN: {}})
self.hass.block_till_done()
assert mock_setup.called
assert ('platform_test', {}, {'msg': 'discovery_info'}) == \
mock_setup.call_args[0]
@patch('homeassistant.helpers.entity_platform.'
'async_track_time_interval')
def test_set_scan_interval_via_config(self, mock_track):
def platform_setup(hass, config, add_entities, discovery_info=None):
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(self.hass, 'test_domain.platform',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'scan_interval': timedelta(seconds=30),
}
})
self.hass.block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
def test_set_entity_namespace_via_config(self):
def platform_setup(hass, config, add_entities, discovery_info=None):
add_entities([
MockEntity(name='beer'),
MockEntity(name=None),
])
platform = MockPlatform(platform_setup)
mock_entity_platform(self.hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'entity_namespace': 'yummy'
}
})
self.hass.block_till_done()
assert sorted(self.hass.states.entity_ids()) == \
['test_domain.yummy_beer', 'test_domain.yummy_unnamed_device']
@asyncio.coroutine
def test_extract_from_service_available_device(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2', available=False),
MockEntity(name='test_3'),
MockEntity(name='test_4', available=False),
])
call_1 = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call_1)))
call_2 = ha.ServiceCall('test', 'service', data={
'entity_id': ['test_domain.test_3', 'test_domain.test_4'],
})
assert ['test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call_2)))
@asyncio.coroutine
def test_platform_not_ready(hass):
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady,
None])
loader.set_component(hass, 'mod1',
MockModule('mod1'))
loader.set_component(hass, 'mod1.test_domain',
MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup({
DOMAIN: {
'platform': 'mod1'
}
})
assert len(platform1_setup.mock_calls) == 1
assert 'test_domain.mod1' not in hass.config.components
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert 'test_domain.mod1' not in hass.config.components
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert 'test_domain.mod1' in hass.config.components
@asyncio.coroutine
def test_extract_from_service_returns_all_if_no_entity_id(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call)))
@asyncio.coroutine
def test_extract_from_service_filter_out_non_existing_entities(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['test_domain.test_2', 'test_domain.non_exist']
})
assert ['test_domain.test_2'] == \
[ent.entity_id for ent
in (yield from component.async_extract_from_service(call))]
@asyncio.coroutine
def test_extract_from_service_no_group_expand(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
test_group = yield from group.Group.async_create_group(
hass, 'test_group', ['light.Ceiling', 'light.Kitchen'])
yield from component.async_add_entities([test_group])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['group.test_group']
})
extracted = yield from component.async_extract_from_service(
call, expand_group=False)
assert extracted == [test_group]
@asyncio.coroutine
def test_setup_dependencies_platform(hass):
loader.set_component(hass, 'test_component',
MockModule('test_component',
dependencies=['test_component2']))
loader.set_component(hass, 'test_component2',
MockModule('test_component2'))
loader.set_component(hass, 'test_component.test_domain', MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup({
DOMAIN: {
'platform': 'test_component',
}
})
assert 'test_component' in hass.config.components
assert 'test_component2' in hass.config.components
assert 'test_domain.test_component' in hass.config.components
async def test_setup_entry(hass):
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry,
scan_interval=timedelta(seconds=5)))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, p_add_entities = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == \
timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='non_existing')
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
await component.async_add_entities([entity])
assert len(entity.async_update_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 2
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
exception = False
def async_loop_exception_handler(_, _2) -> None:
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, 'group', {})
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name='yo')
for i in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') in caplog.text
async def test_extract_all_use_match_all(hass, caplog):
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {'entity_id': 'all'})
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') not in caplog.text
| true
| true
|
790b7e5bb86de78ff18ece74dc22b85fd64ae3b3
| 1,581
|
py
|
Python
|
src/lab/split_names.py
|
decisionscients/Airbnb
|
f61bc76e4d2806bb827f625fd4acbf14e783b97e
|
[
"BSD-3-Clause"
] | null | null | null |
src/lab/split_names.py
|
decisionscients/Airbnb
|
f61bc76e4d2806bb827f625fd4acbf14e783b97e
|
[
"BSD-3-Clause"
] | null | null | null |
src/lab/split_names.py
|
decisionscients/Airbnb
|
f61bc76e4d2806bb827f625fd4acbf14e783b97e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ============================================================================ #
# Project : Airbnb #
# Version : 0.1.0 #
# File : split_names.py #
# Python : 3.8.0 #
# ---------------------------------------------------------------------------- #
# Author : John James #
# Company: DecisionScients #
# Email : jjames@decisionscients.com #
# ---------------------------------------------------------------------------- #
# Created : Tuesday, 7th January 2020 10:22:44 am #
# Last Modified: Tuesday, 7th January 2020 10:22:44 am #
# Modified By : John James (jjames@decisionscients.com>) #
# ---------------------------------------------------------------------------- #
# License: BSD #
# Copyright (c) 2020 DecisionScients #
# ============================================================================ #
#%%
import os
directory = "./data/raw/"
filenames = os.listdir(directory)
for filename in filenames:
name = filename.split(".")[0]
print(name)
# %%
| 52.7
| 80
| 0.253004
|
import os
directory = "./data/raw/"
filenames = os.listdir(directory)
for filename in filenames:
name = filename.split(".")[0]
print(name)
| true
| true
|
790b7eb82afd42b7665db5d5d6e164b7173cb21d
| 1,215
|
py
|
Python
|
tests/test_reward.py
|
AlejandroCN7/sinergym
|
4e89e478b5c939323e7ddf6a6ecf25a9a13251c6
|
[
"MIT"
] | null | null | null |
tests/test_reward.py
|
AlejandroCN7/sinergym
|
4e89e478b5c939323e7ddf6a6ecf25a9a13251c6
|
[
"MIT"
] | null | null | null |
tests/test_reward.py
|
AlejandroCN7/sinergym
|
4e89e478b5c939323e7ddf6a6ecf25a9a13251c6
|
[
"MIT"
] | null | null | null |
import pytest
import sinergym.utils.rewards as R
@pytest.mark.parametrize(
'power,temperatures,month,day,reward,reward_energy,reward_comfort',
[
# Input 1
(
186.5929171535975,
[22.16742570092868],
3,
31,
-0.009329645857679876,
-0.018659291715359752,
-0.0
),
# Input 2
(
688.0477550424935,
[26.7881162590194],
3,
30,
-1.6784605172618248,
-0.06880477550424935,
-3.2881162590194
),
# Input 3
(
23168.30752221127,
[20.37505026953311],
2,
25,
-1.1584153761105636,
-2.316830752221127,
-0.0
),
]
)
def test_calculate(
simple_reward,
power,
temperatures,
month,
day,
reward,
reward_energy,
reward_comfort):
result = simple_reward.calculate(power, temperatures, month, day)
assert result[0] == reward
assert result[1]['reward_energy'] == reward_energy
assert result[1]['reward_comfort'] == reward_comfort
| 22.5
| 71
| 0.498765
|
import pytest
import sinergym.utils.rewards as R
@pytest.mark.parametrize(
'power,temperatures,month,day,reward,reward_energy,reward_comfort',
[
(
186.5929171535975,
[22.16742570092868],
3,
31,
-0.009329645857679876,
-0.018659291715359752,
-0.0
),
(
688.0477550424935,
[26.7881162590194],
3,
30,
-1.6784605172618248,
-0.06880477550424935,
-3.2881162590194
),
(
23168.30752221127,
[20.37505026953311],
2,
25,
-1.1584153761105636,
-2.316830752221127,
-0.0
),
]
)
def test_calculate(
simple_reward,
power,
temperatures,
month,
day,
reward,
reward_energy,
reward_comfort):
result = simple_reward.calculate(power, temperatures, month, day)
assert result[0] == reward
assert result[1]['reward_energy'] == reward_energy
assert result[1]['reward_comfort'] == reward_comfort
| true
| true
|
790b7f3b5f7aa5eabe58e81bbe44ccabc0528705
| 2,728
|
py
|
Python
|
automatminer/utils/tests/test_pkg.py
|
sgbaird/automatminer
|
9a3996e37672b547f10645b53b816ee670940d56
|
[
"BSD-3-Clause-LBNL"
] | 92
|
2018-11-28T17:36:42.000Z
|
2022-03-26T07:45:22.000Z
|
automatminer/utils/tests/test_pkg.py
|
sgbaird/automatminer
|
9a3996e37672b547f10645b53b816ee670940d56
|
[
"BSD-3-Clause-LBNL"
] | 249
|
2018-11-30T22:09:15.000Z
|
2022-01-06T22:25:12.000Z
|
automatminer/utils/tests/test_pkg.py
|
sgbaird/automatminer
|
9a3996e37672b547f10645b53b816ee670940d56
|
[
"BSD-3-Clause-LBNL"
] | 41
|
2018-11-21T22:33:52.000Z
|
2022-03-03T02:24:32.000Z
|
"""
Assorted package utils.
"""
import os
import unittest
import pandas as pd
from automatminer import __version__
from automatminer.base import DFTransformer
from automatminer.utils.pkg import (
AMM_SUPPORTED_EXTS,
check_fitted,
compare_columns,
get_version,
save_dict_to_file,
set_fitted,
)
from sklearn.exceptions import NotFittedError
class MyTransformer(DFTransformer):
def __init__(self):
super(MyTransformer, self).__init__()
@set_fitted
def fit(self, df, target):
return df
@check_fitted
def transform(self, df, target):
return df
class TestPackageTools(unittest.TestCase):
def setUp(self) -> None:
self.remant_base_path = os.path.dirname(__file__)
self.remant_file_prefix = "saved"
def test_compare_columns(self):
df1 = pd.DataFrame({"a": [1, 2], "b": [2, 3]})
df2 = pd.DataFrame({"b": [3, 4], "c": [4, 5]})
comparison = compare_columns(df1, df2)
self.assertTrue(comparison["mismatch"])
self.assertListEqual(comparison["df1_not_in_df2"], ["a"])
self.assertListEqual(comparison["df2_not_in_df1"], ["c"])
comparison2 = compare_columns(df1, df1)
self.assertFalse(comparison2["mismatch"])
comparison3 = compare_columns(df1, df2, ignore=["c"])
self.assertTrue(comparison3["mismatch"])
self.assertListEqual(comparison3["df1_not_in_df2"], ["a"])
self.assertListEqual(comparison3["df2_not_in_df1"], [])
def test_fitting_decorations(self):
df = pd.DataFrame({"a": [1, 2], "b": [2, 3]})
mt = MyTransformer()
self.assertFalse(mt.is_fit)
mt.fit(df, "")
self.assertTrue(mt.is_fit)
df = mt.transform(df, "")
mt2 = MyTransformer()
self.assertRaises(NotFittedError, mt2.transform, [df, ""])
def test_save_dict_to_file(self):
test_dict = {"a": "A", "b": 1, "c": [1, "q"], "d": {"m": [3, 4]}}
for ext in AMM_SUPPORTED_EXTS:
filename = self._get_remnant_path(ext)
save_dict_to_file(test_dict, filename=filename)
self.assertTrue(os.path.isfile(filename))
def test_get_version(self):
v = get_version()
self.assertEqual(v, __version__)
def tearDown(self) -> None:
remnants = [self._get_remnant_path(ext) for ext in AMM_SUPPORTED_EXTS]
for remnant in remnants:
if os.path.exists(remnant):
os.remove(remnant)
def _get_remnant_path(self, ext):
relative_fname = self.remant_file_prefix + ext
filename = os.path.join(self.remant_base_path, relative_fname)
return filename
if __name__ == "__main__":
unittest.main()
| 29.652174
| 78
| 0.637097
|
import os
import unittest
import pandas as pd
from automatminer import __version__
from automatminer.base import DFTransformer
from automatminer.utils.pkg import (
AMM_SUPPORTED_EXTS,
check_fitted,
compare_columns,
get_version,
save_dict_to_file,
set_fitted,
)
from sklearn.exceptions import NotFittedError
class MyTransformer(DFTransformer):
def __init__(self):
super(MyTransformer, self).__init__()
@set_fitted
def fit(self, df, target):
return df
@check_fitted
def transform(self, df, target):
return df
class TestPackageTools(unittest.TestCase):
def setUp(self) -> None:
self.remant_base_path = os.path.dirname(__file__)
self.remant_file_prefix = "saved"
def test_compare_columns(self):
df1 = pd.DataFrame({"a": [1, 2], "b": [2, 3]})
df2 = pd.DataFrame({"b": [3, 4], "c": [4, 5]})
comparison = compare_columns(df1, df2)
self.assertTrue(comparison["mismatch"])
self.assertListEqual(comparison["df1_not_in_df2"], ["a"])
self.assertListEqual(comparison["df2_not_in_df1"], ["c"])
comparison2 = compare_columns(df1, df1)
self.assertFalse(comparison2["mismatch"])
comparison3 = compare_columns(df1, df2, ignore=["c"])
self.assertTrue(comparison3["mismatch"])
self.assertListEqual(comparison3["df1_not_in_df2"], ["a"])
self.assertListEqual(comparison3["df2_not_in_df1"], [])
def test_fitting_decorations(self):
df = pd.DataFrame({"a": [1, 2], "b": [2, 3]})
mt = MyTransformer()
self.assertFalse(mt.is_fit)
mt.fit(df, "")
self.assertTrue(mt.is_fit)
df = mt.transform(df, "")
mt2 = MyTransformer()
self.assertRaises(NotFittedError, mt2.transform, [df, ""])
def test_save_dict_to_file(self):
test_dict = {"a": "A", "b": 1, "c": [1, "q"], "d": {"m": [3, 4]}}
for ext in AMM_SUPPORTED_EXTS:
filename = self._get_remnant_path(ext)
save_dict_to_file(test_dict, filename=filename)
self.assertTrue(os.path.isfile(filename))
def test_get_version(self):
v = get_version()
self.assertEqual(v, __version__)
def tearDown(self) -> None:
remnants = [self._get_remnant_path(ext) for ext in AMM_SUPPORTED_EXTS]
for remnant in remnants:
if os.path.exists(remnant):
os.remove(remnant)
def _get_remnant_path(self, ext):
relative_fname = self.remant_file_prefix + ext
filename = os.path.join(self.remant_base_path, relative_fname)
return filename
if __name__ == "__main__":
unittest.main()
| true
| true
|
790b7f78f12dd92d1977e1fc6cc9107b15ec9c8d
| 8,916
|
py
|
Python
|
supar/structs/fn.py
|
zysite/parser
|
8ed9ccb8e542655fd6fd1b6f7faaf084d13a866e
|
[
"MIT"
] | 6
|
2018-10-26T14:08:45.000Z
|
2019-01-26T02:42:17.000Z
|
supar/structs/fn.py
|
zysite/parser
|
8ed9ccb8e542655fd6fd1b6f7faaf084d13a866e
|
[
"MIT"
] | 1
|
2019-01-16T14:44:29.000Z
|
2019-01-17T07:55:38.000Z
|
supar/structs/fn.py
|
zysite/parser
|
8ed9ccb8e542655fd6fd1b6f7faaf084d13a866e
|
[
"MIT"
] | 1
|
2018-12-31T07:43:16.000Z
|
2018-12-31T07:43:16.000Z
|
# -*- coding: utf-8 -*-
import torch
from supar.utils.common import MIN
from supar.utils.fn import pad
from torch.autograd import Function
def tarjan(sequence):
r"""
Tarjan algorithm for finding Strongly Connected Components (SCCs) of a graph.
Args:
sequence (list):
List of head indices.
Yields:
A list of indices making up a SCC. All self-loops are ignored.
Examples:
>>> next(tarjan([2, 5, 0, 3, 1])) # (1 -> 5 -> 2 -> 1) is a cycle
[2, 5, 1]
"""
sequence = [-1] + sequence
# record the search order, i.e., the timestep
dfn = [-1] * len(sequence)
# record the the smallest timestep in a SCC
low = [-1] * len(sequence)
# push the visited into the stack
stack, onstack = [], [False] * len(sequence)
def connect(i, timestep):
dfn[i] = low[i] = timestep[0]
timestep[0] += 1
stack.append(i)
onstack[i] = True
for j, head in enumerate(sequence):
if head != i:
continue
if dfn[j] == -1:
yield from connect(j, timestep)
low[i] = min(low[i], low[j])
elif onstack[j]:
low[i] = min(low[i], dfn[j])
# a SCC is completed
if low[i] == dfn[i]:
cycle = [stack.pop()]
while cycle[-1] != i:
onstack[cycle[-1]] = False
cycle.append(stack.pop())
onstack[i] = False
# ignore the self-loop
if len(cycle) > 1:
yield cycle
timestep = [0]
for i in range(len(sequence)):
if dfn[i] == -1:
yield from connect(i, timestep)
def chuliu_edmonds(s):
r"""
ChuLiu/Edmonds algorithm for non-projective decoding :cite:`mcdonald-etal-2005-non`.
Some code is borrowed from `tdozat's implementation`_.
Descriptions of notations and formulas can be found in :cite:`mcdonald-etal-2005-non`.
Notes:
The algorithm does not guarantee to parse a single-root tree.
Args:
s (~torch.Tensor): ``[seq_len, seq_len]``.
Scores of all dependent-head pairs.
Returns:
~torch.Tensor:
A tensor with shape ``[seq_len]`` for the resulting non-projective parse tree.
.. _tdozat's implementation:
https://github.com/tdozat/Parser-v3
"""
s[0, 1:] = MIN
# prevent self-loops
s.diagonal()[1:].fill_(MIN)
# select heads with highest scores
tree = s.argmax(-1)
# return the cycle finded by tarjan algorithm lazily
cycle = next(tarjan(tree.tolist()[1:]), None)
# if the tree has no cycles, then it is a MST
if not cycle:
return tree
# indices of cycle in the original tree
cycle = torch.tensor(cycle)
# indices of noncycle in the original tree
noncycle = torch.ones(len(s)).index_fill_(0, cycle, 0)
noncycle = torch.where(noncycle.gt(0))[0]
def contract(s):
# heads of cycle in original tree
cycle_heads = tree[cycle]
# scores of cycle in original tree
s_cycle = s[cycle, cycle_heads]
# calculate the scores of cycle's potential dependents
# s(c->x) = max(s(x'->x)), x in noncycle and x' in cycle
s_dep = s[noncycle][:, cycle]
# find the best cycle head for each noncycle dependent
deps = s_dep.argmax(1)
# calculate the scores of cycle's potential heads
# s(x->c) = max(s(x'->x) - s(a(x')->x') + s(cycle)), x in noncycle and x' in cycle
# a(v) is the predecessor of v in cycle
# s(cycle) = sum(s(a(v)->v))
s_head = s[cycle][:, noncycle] - s_cycle.view(-1, 1) + s_cycle.sum()
# find the best noncycle head for each cycle dependent
heads = s_head.argmax(0)
contracted = torch.cat((noncycle, torch.tensor([-1])))
# calculate the scores of contracted graph
s = s[contracted][:, contracted]
# set the contracted graph scores of cycle's potential dependents
s[:-1, -1] = s_dep[range(len(deps)), deps]
# set the contracted graph scores of cycle's potential heads
s[-1, :-1] = s_head[heads, range(len(heads))]
return s, heads, deps
# keep track of the endpoints of the edges into and out of cycle for reconstruction later
s, heads, deps = contract(s)
# y is the contracted tree
y = chuliu_edmonds(s)
# exclude head of cycle from y
y, cycle_head = y[:-1], y[-1]
# fix the subtree with no heads coming from the cycle
# len(y) denotes heads coming from the cycle
subtree = y < len(y)
# add the nodes to the new tree
tree[noncycle[subtree]] = noncycle[y[subtree]]
# fix the subtree with heads coming from the cycle
subtree = ~subtree
# add the nodes to the tree
tree[noncycle[subtree]] = cycle[deps[subtree]]
# fix the root of the cycle
cycle_root = heads[cycle_head]
# break the cycle and add the root of the cycle to the tree
tree[cycle[cycle_root]] = noncycle[cycle_head]
return tree
def mst(scores, mask, multiroot=False):
r"""
MST algorithm for decoding non-projective trees.
This is a wrapper for ChuLiu/Edmonds algorithm.
The algorithm first runs ChuLiu/Edmonds to parse a tree and then have a check of multi-roots,
If ``multiroot=True`` and there indeed exist multi-roots, the algorithm seeks to find
best single-root trees by iterating all possible single-root trees parsed by ChuLiu/Edmonds.
Otherwise the resulting trees are directly taken as the final outputs.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all dependent-head pairs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
multiroot (bool):
Ensures to parse a single-root tree If ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting non-projective parse trees.
Examples:
>>> scores = torch.tensor([[[-11.9436, -13.1464, -6.4789, -13.8917],
[-60.6957, -60.2866, -48.6457, -63.8125],
[-38.1747, -49.9296, -45.2733, -49.5571],
[-19.7504, -23.9066, -9.9139, -16.2088]]])
>>> scores[:, 0, 1:] = MIN
>>> scores.diagonal(0, 1, 2)[1:].fill_(MIN)
>>> mask = torch.tensor([[False, True, True, True]])
>>> mst(scores, mask)
tensor([[0, 2, 0, 2]])
"""
batch_size, seq_len, _ = scores.shape
scores = scores.cpu().unbind()
preds = []
for i, length in enumerate(mask.sum(1).tolist()):
s = scores[i][:length+1, :length+1]
tree = chuliu_edmonds(s)
roots = torch.where(tree[1:].eq(0))[0] + 1
if not multiroot and len(roots) > 1:
s_root = s[:, 0]
s_best = MIN
s = s.index_fill(1, torch.tensor(0), MIN)
for root in roots:
s[:, 0] = MIN
s[root, 0] = s_root[root]
t = chuliu_edmonds(s)
s_tree = s[1:].gather(1, t[1:].unsqueeze(-1)).sum()
if s_tree > s_best:
s_best, tree = s_tree, t
preds.append(tree)
return pad(preds, total_length=seq_len).to(mask.device)
class SampledLogsumexp(Function):
@staticmethod
def forward(ctx, x, dim=-1):
ctx.dim = dim
ctx.save_for_backward(x)
return x.logsumexp(dim=dim)
@staticmethod
def backward(ctx, grad_output):
from torch.distributions import OneHotCategorical
x, dim = ctx.saved_tensors, ctx.dim
if ctx.needs_input_grad[0]:
return grad_output.unsqueeze(dim).mul(OneHotCategorical(logits=x.movedim(dim, -1)).sample().movedim(-1, dim)), None
return None, None
class Sparsemax(Function):
@staticmethod
def forward(ctx, x, dim=-1):
ctx.dim = dim
sorted_x, _ = x.sort(dim, True)
z = sorted_x.cumsum(dim) - 1
k = x.new_tensor(range(1, sorted_x.size(dim) + 1)).view(-1, *[1] * (x.dim() - 1)).transpose(0, dim)
k = (k * sorted_x).gt(z).sum(dim, True)
tau = z.gather(dim, k - 1) / k
p = torch.clamp(x - tau, 0)
ctx.save_for_backward(k, p)
return p
@staticmethod
def backward(ctx, grad_output):
k, p, dim = *ctx.saved_tensors, ctx.dim
grad = grad_output.masked_fill(p.eq(0), 0)
grad = torch.where(p.ne(0), grad - grad.sum(dim, True) / k, grad)
return grad, None
sampled_logsumexp = SampledLogsumexp.apply
sparsemax = Sparsemax.apply
| 34.55814
| 127
| 0.574361
|
import torch
from supar.utils.common import MIN
from supar.utils.fn import pad
from torch.autograd import Function
def tarjan(sequence):
sequence = [-1] + sequence
dfn = [-1] * len(sequence)
low = [-1] * len(sequence)
stack, onstack = [], [False] * len(sequence)
def connect(i, timestep):
dfn[i] = low[i] = timestep[0]
timestep[0] += 1
stack.append(i)
onstack[i] = True
for j, head in enumerate(sequence):
if head != i:
continue
if dfn[j] == -1:
yield from connect(j, timestep)
low[i] = min(low[i], low[j])
elif onstack[j]:
low[i] = min(low[i], dfn[j])
if low[i] == dfn[i]:
cycle = [stack.pop()]
while cycle[-1] != i:
onstack[cycle[-1]] = False
cycle.append(stack.pop())
onstack[i] = False
if len(cycle) > 1:
yield cycle
timestep = [0]
for i in range(len(sequence)):
if dfn[i] == -1:
yield from connect(i, timestep)
def chuliu_edmonds(s):
s[0, 1:] = MIN
s.diagonal()[1:].fill_(MIN)
tree = s.argmax(-1)
cycle = next(tarjan(tree.tolist()[1:]), None)
if not cycle:
return tree
cycle = torch.tensor(cycle)
noncycle = torch.ones(len(s)).index_fill_(0, cycle, 0)
noncycle = torch.where(noncycle.gt(0))[0]
def contract(s):
cycle_heads = tree[cycle]
s_cycle = s[cycle, cycle_heads]
# s(c->x) = max(s(x'->x)), x in noncycle and x' in cycle
s_dep = s[noncycle][:, cycle]
# find the best cycle head for each noncycle dependent
deps = s_dep.argmax(1)
# calculate the scores of cycle's potential heads
s_head = s[cycle][:, noncycle] - s_cycle.view(-1, 1) + s_cycle.sum()
heads = s_head.argmax(0)
contracted = torch.cat((noncycle, torch.tensor([-1])))
s = s[contracted][:, contracted]
s[:-1, -1] = s_dep[range(len(deps)), deps]
# set the contracted graph scores of cycle's potential heads
s[-1, :-1] = s_head[heads, range(len(heads))]
return s, heads, deps
s, heads, deps = contract(s)
y = chuliu_edmonds(s)
y, cycle_head = y[:-1], y[-1]
subtree = y < len(y)
tree[noncycle[subtree]] = noncycle[y[subtree]]
subtree = ~subtree
tree[noncycle[subtree]] = cycle[deps[subtree]]
cycle_root = heads[cycle_head]
tree[cycle[cycle_root]] = noncycle[cycle_head]
return tree
def mst(scores, mask, multiroot=False):
batch_size, seq_len, _ = scores.shape
scores = scores.cpu().unbind()
preds = []
for i, length in enumerate(mask.sum(1).tolist()):
s = scores[i][:length+1, :length+1]
tree = chuliu_edmonds(s)
roots = torch.where(tree[1:].eq(0))[0] + 1
if not multiroot and len(roots) > 1:
s_root = s[:, 0]
s_best = MIN
s = s.index_fill(1, torch.tensor(0), MIN)
for root in roots:
s[:, 0] = MIN
s[root, 0] = s_root[root]
t = chuliu_edmonds(s)
s_tree = s[1:].gather(1, t[1:].unsqueeze(-1)).sum()
if s_tree > s_best:
s_best, tree = s_tree, t
preds.append(tree)
return pad(preds, total_length=seq_len).to(mask.device)
class SampledLogsumexp(Function):
@staticmethod
def forward(ctx, x, dim=-1):
ctx.dim = dim
ctx.save_for_backward(x)
return x.logsumexp(dim=dim)
@staticmethod
def backward(ctx, grad_output):
from torch.distributions import OneHotCategorical
x, dim = ctx.saved_tensors, ctx.dim
if ctx.needs_input_grad[0]:
return grad_output.unsqueeze(dim).mul(OneHotCategorical(logits=x.movedim(dim, -1)).sample().movedim(-1, dim)), None
return None, None
class Sparsemax(Function):
@staticmethod
def forward(ctx, x, dim=-1):
ctx.dim = dim
sorted_x, _ = x.sort(dim, True)
z = sorted_x.cumsum(dim) - 1
k = x.new_tensor(range(1, sorted_x.size(dim) + 1)).view(-1, *[1] * (x.dim() - 1)).transpose(0, dim)
k = (k * sorted_x).gt(z).sum(dim, True)
tau = z.gather(dim, k - 1) / k
p = torch.clamp(x - tau, 0)
ctx.save_for_backward(k, p)
return p
@staticmethod
def backward(ctx, grad_output):
k, p, dim = *ctx.saved_tensors, ctx.dim
grad = grad_output.masked_fill(p.eq(0), 0)
grad = torch.where(p.ne(0), grad - grad.sum(dim, True) / k, grad)
return grad, None
sampled_logsumexp = SampledLogsumexp.apply
sparsemax = Sparsemax.apply
| true
| true
|
790b7fce83d886900a5fcc3446fe99b2c83b3bf4
| 9,579
|
py
|
Python
|
cbpro/order_book.py
|
ibraaaa/coinbasepro-python
|
0a9dbd86a25ae266d0e0eefeb112368c284b7dcc
|
[
"MIT"
] | 5
|
2019-05-04T01:30:34.000Z
|
2020-11-26T05:07:08.000Z
|
cbpro/order_book.py
|
ibraaaa/coinbasepro-python
|
0a9dbd86a25ae266d0e0eefeb112368c284b7dcc
|
[
"MIT"
] | 4
|
2021-03-19T02:41:55.000Z
|
2022-03-11T23:55:52.000Z
|
cbpro/order_book.py
|
ibraaaa/coinbasepro-python
|
0a9dbd86a25ae266d0e0eefeb112368c284b7dcc
|
[
"MIT"
] | 2
|
2018-01-15T23:37:49.000Z
|
2018-06-24T16:27:53.000Z
|
#
# cbpro/order_book.py
# David Caseria
#
# Live order book updated from the Coinbase Websocket Feed
from sortedcontainers import SortedDict
from decimal import Decimal
import pickle
from cbpro.public_client import PublicClient
from cbpro.websocket_client import WebsocketClient
class OrderBook(WebsocketClient):
def __init__(self, product_id='BTC-USD', log_to=None):
super(OrderBook, self).__init__(products=product_id)
self._asks = SortedDict()
self._bids = SortedDict()
self._client = PublicClient()
self._sequence = -1
self._log_to = log_to
if self._log_to:
assert hasattr(self._log_to, 'write')
self._current_ticker = None
@property
def product_id(self):
''' Currently OrderBook only supports a single product even though it is stored as a list of products. '''
return self.products[0]
def on_open(self):
self._sequence = -1
print("-- Subscribed to OrderBook! --\n")
def on_close(self):
print("\n-- OrderBook Socket Closed! --")
def reset_book(self):
self._asks = SortedDict()
self._bids = SortedDict()
res = self._client.get_product_order_book(product_id=self.product_id, level=3)
for bid in res['bids']:
self.add({
'id': bid[2],
'side': 'buy',
'price': Decimal(bid[0]),
'size': Decimal(bid[1])
})
for ask in res['asks']:
self.add({
'id': ask[2],
'side': 'sell',
'price': Decimal(ask[0]),
'size': Decimal(ask[1])
})
self._sequence = res['sequence']
def on_message(self, message):
if self._log_to:
pickle.dump(message, self._log_to)
sequence = message.get('sequence', -1)
if self._sequence == -1:
self.reset_book()
return
if sequence <= self._sequence:
# ignore older messages (e.g. before order book initialization from getProductOrderBook)
return
elif sequence > self._sequence + 1:
self.on_sequence_gap(self._sequence, sequence)
return
msg_type = message['type']
if msg_type == 'open':
self.add(message)
elif msg_type == 'done' and 'price' in message:
self.remove(message)
elif msg_type == 'match':
self.match(message)
self._current_ticker = message
elif msg_type == 'change':
self.change(message)
self._sequence = sequence
def on_sequence_gap(self, gap_start, gap_end):
self.reset_book()
print('Error: messages missing ({} - {}). Re-initializing book at sequence.'.format(
gap_start, gap_end, self._sequence))
def add(self, order):
order = {
'id': order.get('order_id') or order['id'],
'side': order['side'],
'price': Decimal(order['price']),
'size': Decimal(order.get('size') or order['remaining_size'])
}
if order['side'] == 'buy':
bids = self.get_bids(order['price'])
if bids is None:
bids = [order]
else:
bids.append(order)
self.set_bids(order['price'], bids)
else:
asks = self.get_asks(order['price'])
if asks is None:
asks = [order]
else:
asks.append(order)
self.set_asks(order['price'], asks)
def remove(self, order):
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is not None:
bids = [o for o in bids if o['id'] != order['order_id']]
if len(bids) > 0:
self.set_bids(price, bids)
else:
self.remove_bids(price)
else:
asks = self.get_asks(price)
if asks is not None:
asks = [o for o in asks if o['id'] != order['order_id']]
if len(asks) > 0:
self.set_asks(price, asks)
else:
self.remove_asks(price)
def match(self, order):
size = Decimal(order['size'])
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if not bids:
return
assert bids[0]['id'] == order['maker_order_id']
if bids[0]['size'] == size:
self.set_bids(price, bids[1:])
else:
bids[0]['size'] -= size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if not asks:
return
assert asks[0]['id'] == order['maker_order_id']
if asks[0]['size'] == size:
self.set_asks(price, asks[1:])
else:
asks[0]['size'] -= size
self.set_asks(price, asks)
def change(self, order):
try:
new_size = Decimal(order['new_size'])
except KeyError:
return
try:
price = Decimal(order['price'])
except KeyError:
return
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is None or not any(o['id'] == order['order_id'] for o in bids):
return
index = [b['id'] for b in bids].index(order['order_id'])
bids[index]['size'] = new_size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if asks is None or not any(o['id'] == order['order_id'] for o in asks):
return
index = [a['id'] for a in asks].index(order['order_id'])
asks[index]['size'] = new_size
self.set_asks(price, asks)
tree = self._asks if order['side'] == 'sell' else self._bids
node = tree.get(price)
if node is None or not any(o['id'] == order['order_id'] for o in node):
return
def get_current_ticker(self):
return self._current_ticker
def get_current_book(self):
result = {
'sequence': self._sequence,
'asks': [],
'bids': [],
}
for ask in self._asks:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_ask = self._asks[ask]
except KeyError:
continue
for order in this_ask:
result['asks'].append([order['price'], order['size'], order['id']])
for bid in self._bids:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_bid = self._bids[bid]
except KeyError:
continue
for order in this_bid:
result['bids'].append([order['price'], order['size'], order['id']])
return result
def get_ask(self):
return self._asks.peekitem(0)[0]
def get_asks(self, price):
return self._asks.get(price)
def remove_asks(self, price):
del self._asks[price]
def set_asks(self, price, asks):
self._asks[price] = asks
def get_bid(self):
return self._bids.peekitem(-1)[0]
def get_bids(self, price):
return self._bids.get(price)
def remove_bids(self, price):
del self._bids[price]
def set_bids(self, price, bids):
self._bids[price] = bids
if __name__ == '__main__':
import sys
import time
import datetime as dt
class OrderBookConsole(OrderBook):
''' Logs real-time changes to the bid-ask spread to the console '''
def __init__(self, product_id=None):
super(OrderBookConsole, self).__init__(product_id=product_id)
# latest values of bid-ask spread
self._bid = None
self._ask = None
self._bid_depth = None
self._ask_depth = None
def on_message(self, message):
super(OrderBookConsole, self).on_message(message)
# Calculate newest bid-ask spread
bid = self.get_bid()
bids = self.get_bids(bid)
bid_depth = sum([b['size'] for b in bids])
ask = self.get_ask()
asks = self.get_asks(ask)
ask_depth = sum([a['size'] for a in asks])
if self._bid == bid and self._ask == ask and self._bid_depth == bid_depth and self._ask_depth == ask_depth:
# If there are no changes to the bid-ask spread since the last update, no need to print
pass
else:
# If there are differences, update the cache
self._bid = bid
self._ask = ask
self._bid_depth = bid_depth
self._ask_depth = ask_depth
print('{} {} bid: {:.3f} @ {:.2f}\task: {:.3f} @ {:.2f}'.format(
dt.datetime.now(), self.product_id, bid_depth, bid, ask_depth, ask))
order_book = OrderBookConsole()
order_book.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
order_book.close()
if order_book.error:
sys.exit(1)
else:
sys.exit(0)
| 32.036789
| 119
| 0.524167
|
from sortedcontainers import SortedDict
from decimal import Decimal
import pickle
from cbpro.public_client import PublicClient
from cbpro.websocket_client import WebsocketClient
class OrderBook(WebsocketClient):
def __init__(self, product_id='BTC-USD', log_to=None):
super(OrderBook, self).__init__(products=product_id)
self._asks = SortedDict()
self._bids = SortedDict()
self._client = PublicClient()
self._sequence = -1
self._log_to = log_to
if self._log_to:
assert hasattr(self._log_to, 'write')
self._current_ticker = None
@property
def product_id(self):
return self.products[0]
def on_open(self):
self._sequence = -1
print("-- Subscribed to OrderBook! --\n")
def on_close(self):
print("\n-- OrderBook Socket Closed! --")
def reset_book(self):
self._asks = SortedDict()
self._bids = SortedDict()
res = self._client.get_product_order_book(product_id=self.product_id, level=3)
for bid in res['bids']:
self.add({
'id': bid[2],
'side': 'buy',
'price': Decimal(bid[0]),
'size': Decimal(bid[1])
})
for ask in res['asks']:
self.add({
'id': ask[2],
'side': 'sell',
'price': Decimal(ask[0]),
'size': Decimal(ask[1])
})
self._sequence = res['sequence']
def on_message(self, message):
if self._log_to:
pickle.dump(message, self._log_to)
sequence = message.get('sequence', -1)
if self._sequence == -1:
self.reset_book()
return
if sequence <= self._sequence:
return
elif sequence > self._sequence + 1:
self.on_sequence_gap(self._sequence, sequence)
return
msg_type = message['type']
if msg_type == 'open':
self.add(message)
elif msg_type == 'done' and 'price' in message:
self.remove(message)
elif msg_type == 'match':
self.match(message)
self._current_ticker = message
elif msg_type == 'change':
self.change(message)
self._sequence = sequence
def on_sequence_gap(self, gap_start, gap_end):
self.reset_book()
print('Error: messages missing ({} - {}). Re-initializing book at sequence.'.format(
gap_start, gap_end, self._sequence))
def add(self, order):
order = {
'id': order.get('order_id') or order['id'],
'side': order['side'],
'price': Decimal(order['price']),
'size': Decimal(order.get('size') or order['remaining_size'])
}
if order['side'] == 'buy':
bids = self.get_bids(order['price'])
if bids is None:
bids = [order]
else:
bids.append(order)
self.set_bids(order['price'], bids)
else:
asks = self.get_asks(order['price'])
if asks is None:
asks = [order]
else:
asks.append(order)
self.set_asks(order['price'], asks)
def remove(self, order):
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is not None:
bids = [o for o in bids if o['id'] != order['order_id']]
if len(bids) > 0:
self.set_bids(price, bids)
else:
self.remove_bids(price)
else:
asks = self.get_asks(price)
if asks is not None:
asks = [o for o in asks if o['id'] != order['order_id']]
if len(asks) > 0:
self.set_asks(price, asks)
else:
self.remove_asks(price)
def match(self, order):
size = Decimal(order['size'])
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if not bids:
return
assert bids[0]['id'] == order['maker_order_id']
if bids[0]['size'] == size:
self.set_bids(price, bids[1:])
else:
bids[0]['size'] -= size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if not asks:
return
assert asks[0]['id'] == order['maker_order_id']
if asks[0]['size'] == size:
self.set_asks(price, asks[1:])
else:
asks[0]['size'] -= size
self.set_asks(price, asks)
def change(self, order):
try:
new_size = Decimal(order['new_size'])
except KeyError:
return
try:
price = Decimal(order['price'])
except KeyError:
return
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is None or not any(o['id'] == order['order_id'] for o in bids):
return
index = [b['id'] for b in bids].index(order['order_id'])
bids[index]['size'] = new_size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if asks is None or not any(o['id'] == order['order_id'] for o in asks):
return
index = [a['id'] for a in asks].index(order['order_id'])
asks[index]['size'] = new_size
self.set_asks(price, asks)
tree = self._asks if order['side'] == 'sell' else self._bids
node = tree.get(price)
if node is None or not any(o['id'] == order['order_id'] for o in node):
return
def get_current_ticker(self):
return self._current_ticker
def get_current_book(self):
result = {
'sequence': self._sequence,
'asks': [],
'bids': [],
}
for ask in self._asks:
try:
this_ask = self._asks[ask]
except KeyError:
continue
for order in this_ask:
result['asks'].append([order['price'], order['size'], order['id']])
for bid in self._bids:
try:
this_bid = self._bids[bid]
except KeyError:
continue
for order in this_bid:
result['bids'].append([order['price'], order['size'], order['id']])
return result
def get_ask(self):
return self._asks.peekitem(0)[0]
def get_asks(self, price):
return self._asks.get(price)
def remove_asks(self, price):
del self._asks[price]
def set_asks(self, price, asks):
self._asks[price] = asks
def get_bid(self):
return self._bids.peekitem(-1)[0]
def get_bids(self, price):
return self._bids.get(price)
def remove_bids(self, price):
del self._bids[price]
def set_bids(self, price, bids):
self._bids[price] = bids
if __name__ == '__main__':
import sys
import time
import datetime as dt
class OrderBookConsole(OrderBook):
def __init__(self, product_id=None):
super(OrderBookConsole, self).__init__(product_id=product_id)
self._bid = None
self._ask = None
self._bid_depth = None
self._ask_depth = None
def on_message(self, message):
super(OrderBookConsole, self).on_message(message)
bid = self.get_bid()
bids = self.get_bids(bid)
bid_depth = sum([b['size'] for b in bids])
ask = self.get_ask()
asks = self.get_asks(ask)
ask_depth = sum([a['size'] for a in asks])
if self._bid == bid and self._ask == ask and self._bid_depth == bid_depth and self._ask_depth == ask_depth:
pass
else:
self._bid = bid
self._ask = ask
self._bid_depth = bid_depth
self._ask_depth = ask_depth
print('{} {} bid: {:.3f} @ {:.2f}\task: {:.3f} @ {:.2f}'.format(
dt.datetime.now(), self.product_id, bid_depth, bid, ask_depth, ask))
order_book = OrderBookConsole()
order_book.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
order_book.close()
if order_book.error:
sys.exit(1)
else:
sys.exit(0)
| true
| true
|
790b81de9070d0250802693998ac3959dc4b0065
| 4,576
|
py
|
Python
|
scripts/train_model.py
|
Voda88/mlops
|
412e95b6580e9820d4e57f93bd4c52ec877162eb
|
[
"MIT"
] | null | null | null |
scripts/train_model.py
|
Voda88/mlops
|
412e95b6580e9820d4e57f93bd4c52ec877162eb
|
[
"MIT"
] | null | null | null |
scripts/train_model.py
|
Voda88/mlops
|
412e95b6580e9820d4e57f93bd4c52ec877162eb
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from azureml.core.run import Run
import os
import argparse
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import joblib
import json
def train_model(run, data, alpha):
run.log("alpha", alpha)
run.parent.log("alpha", alpha)
reg = Ridge(alpha=alpha)
reg.fit(data["train"]["X"], data["train"]["y"])
preds = reg.predict(data["test"]["X"])
run.log("mse", mean_squared_error(
preds, data["test"]["y"]), description="Mean squared error metric")
run.parent.log("mse", mean_squared_error(
preds, data["test"]["y"]), description="Mean squared error metric")
return reg
def main():
print("Running train.py")
parser = argparse.ArgumentParser("train")
parser.add_argument(
"--build_id",
type=str,
help="The build ID of the build triggering this pipeline run",
)
parser.add_argument(
"--model_name",
type=str,
help="Name of the Model",
default="sklearn_regression_model.pkl",
)
parser.add_argument(
"--step_output",
type=str,
help=("output for passing data to next step")
)
args = parser.parse_args()
print("Argument [build_id]: %s" % args.build_id)
print("Argument [model_name]: %s" % args.model_name)
print("Argument [step_output]: %s" % args.step_output)
model_name = args.model_name
build_id = args.build_id
step_output_path = args.step_output
print("Getting training parameters")
alpha = 0.5
print("Parameter alpha: %s" % alpha)
run = Run.get_context()
# Get the dataset
dataset = run.input_datasets['training_data']
if (dataset):
df = dataset.to_pandas_dataframe()
X = df.values
y = df.Y
else:
e = ("No dataset provided")
print(e)
raise Exception(e)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
data = {"train": {"X": X_train, "y": y_train},
"test": {"X": X_test, "y": y_test}}
reg = train_model(run, data, alpha)
# Pass model file to next step
os.makedirs(step_output_path, exist_ok=True)
model_output_path = os.path.join(step_output_path, model_name)
joblib.dump(value=reg, filename=model_output_path)
# Also upload model file to run outputs for history
os.makedirs('outputs', exist_ok=True)
output_path = os.path.join('outputs', model_name)
joblib.dump(value=reg, filename=output_path)
# Add properties to identify this specific training run
run.parent.tag("BuildId", value=build_id)
run.tag("BuildId", value=build_id)
run.tag("run_type", value="train")
builduri_base = os.environ.get("BUILDURI_BASE")
if (builduri_base is not None):
build_uri = builduri_base + build_id
run.tag("BuildUri", value=build_uri)
run.parent.tag("BuildUri", value=build_uri)
print(f"tags now present for run: {run.tags}")
run.complete()
if __name__ == '__main__':
main()
| 34.666667
| 79
| 0.696897
|
from azureml.core.run import Run
import os
import argparse
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import joblib
import json
def train_model(run, data, alpha):
run.log("alpha", alpha)
run.parent.log("alpha", alpha)
reg = Ridge(alpha=alpha)
reg.fit(data["train"]["X"], data["train"]["y"])
preds = reg.predict(data["test"]["X"])
run.log("mse", mean_squared_error(
preds, data["test"]["y"]), description="Mean squared error metric")
run.parent.log("mse", mean_squared_error(
preds, data["test"]["y"]), description="Mean squared error metric")
return reg
def main():
print("Running train.py")
parser = argparse.ArgumentParser("train")
parser.add_argument(
"--build_id",
type=str,
help="The build ID of the build triggering this pipeline run",
)
parser.add_argument(
"--model_name",
type=str,
help="Name of the Model",
default="sklearn_regression_model.pkl",
)
parser.add_argument(
"--step_output",
type=str,
help=("output for passing data to next step")
)
args = parser.parse_args()
print("Argument [build_id]: %s" % args.build_id)
print("Argument [model_name]: %s" % args.model_name)
print("Argument [step_output]: %s" % args.step_output)
model_name = args.model_name
build_id = args.build_id
step_output_path = args.step_output
print("Getting training parameters")
alpha = 0.5
print("Parameter alpha: %s" % alpha)
run = Run.get_context()
dataset = run.input_datasets['training_data']
if (dataset):
df = dataset.to_pandas_dataframe()
X = df.values
y = df.Y
else:
e = ("No dataset provided")
print(e)
raise Exception(e)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
data = {"train": {"X": X_train, "y": y_train},
"test": {"X": X_test, "y": y_test}}
reg = train_model(run, data, alpha)
os.makedirs(step_output_path, exist_ok=True)
model_output_path = os.path.join(step_output_path, model_name)
joblib.dump(value=reg, filename=model_output_path)
os.makedirs('outputs', exist_ok=True)
output_path = os.path.join('outputs', model_name)
joblib.dump(value=reg, filename=output_path)
run.parent.tag("BuildId", value=build_id)
run.tag("BuildId", value=build_id)
run.tag("run_type", value="train")
builduri_base = os.environ.get("BUILDURI_BASE")
if (builduri_base is not None):
build_uri = builduri_base + build_id
run.tag("BuildUri", value=build_uri)
run.parent.tag("BuildUri", value=build_uri)
print(f"tags now present for run: {run.tags}")
run.complete()
if __name__ == '__main__':
main()
| true
| true
|
790b820a6fc0f0a35e231d3e3a829a935b761f89
| 4,734
|
py
|
Python
|
mlrun/runtimes/function_reference.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2021-02-17T08:12:33.000Z
|
2021-02-17T08:12:33.000Z
|
mlrun/runtimes/function_reference.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2020-12-31T14:36:29.000Z
|
2020-12-31T14:36:29.000Z
|
mlrun/runtimes/function_reference.py
|
Hedingber/mlrun
|
e2269718fcc7caa7e1aa379ac28495830b45f9da
|
[
"Apache-2.0"
] | 1
|
2021-08-30T21:43:38.000Z
|
2021-08-30T21:43:38.000Z
|
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from base64 import b64encode
from nuclio.build import mlrun_footer
import mlrun
from ..model import ModelObj
from ..utils import generate_object_uri
from .utils import enrich_function_from_dict
class FunctionReference(ModelObj):
"""function reference/template, point to function and add/override resources"""
def __init__(
self,
url=None,
image=None,
requirements=None,
code=None,
spec=None,
kind=None,
name=None,
):
self.url = url
self.kind = kind
self.image = image
self.requirements = requirements
self.name = name
if hasattr(spec, "to_dict"):
spec = spec.to_dict()
self.spec = spec
self.code = code
self._function = None
self._address = None
def is_empty(self):
if self.url or self.code or self.spec:
return False
return True
def fullname(self, parent):
return f"{parent.metadata.name}-{self.name}"
def uri(self, parent, tag=None, hash_key=None, fullname=True):
name = self.fullname(parent) if fullname else self.name
return generate_object_uri(
parent.metadata.project,
name,
tag=tag or parent.metadata.tag,
hash_key=hash_key,
)
@property
def function_object(self):
"""get the generated function object"""
return self._function
def to_function(self, default_kind=None):
"""generate a function object from the ref definitions"""
if self.url and "://" not in self.url:
if not os.path.isfile(self.url):
raise OSError(f"{self.url} not found")
kind = self.kind or default_kind
if self.url:
if (
self.url.endswith(".yaml")
or self.url.startswith("db://")
or self.url.startswith("hub://")
):
func = mlrun.import_function(self.url)
if self.image:
func.spec.image = self.image
elif self.url.endswith(".ipynb"):
func = mlrun.code_to_function(
self.name, filename=self.url, image=self.image, kind=kind
)
elif self.url.endswith(".py"):
# todo: support code text as input (for UI)
if not self.image:
raise ValueError(
"image must be provided with py code files, "
"use function object for more control/settings"
)
func = mlrun.code_to_function(
self.name, filename=self.url, image=self.image, kind=kind
)
else:
raise ValueError(f"unsupported function url {self.url} or no spec")
if self.spec:
func = enrich_function_from_dict(func, self.spec)
elif self.code is not None:
code = self.code
if kind == mlrun.runtimes.RuntimeKinds.serving:
code = code + mlrun_footer.format(
mlrun.runtimes.serving.serving_subkind
)
func = mlrun.new_function(self.name, kind=kind, image=self.image)
data = b64encode(code.encode("utf-8")).decode("utf-8")
func.spec.build.functionSourceCode = data
if kind not in mlrun.runtimes.RuntimeKinds.nuclio_runtimes():
func.spec.default_handler = "handler"
if self.spec:
func = enrich_function_from_dict(func, self.spec)
elif self.spec:
func = mlrun.new_function(self.name, runtime=self.spec)
else:
raise ValueError("url or spec or code must be specified")
if self.requirements:
func.with_requirements(self.requirements)
self._function = func
return func
@property
def address(self):
return self._address
def deploy(self, **kwargs):
"""deploy the function"""
self._address = self._function.deploy(**kwargs)
return self._address
| 33.814286
| 83
| 0.586608
|
import os
from base64 import b64encode
from nuclio.build import mlrun_footer
import mlrun
from ..model import ModelObj
from ..utils import generate_object_uri
from .utils import enrich_function_from_dict
class FunctionReference(ModelObj):
def __init__(
self,
url=None,
image=None,
requirements=None,
code=None,
spec=None,
kind=None,
name=None,
):
self.url = url
self.kind = kind
self.image = image
self.requirements = requirements
self.name = name
if hasattr(spec, "to_dict"):
spec = spec.to_dict()
self.spec = spec
self.code = code
self._function = None
self._address = None
def is_empty(self):
if self.url or self.code or self.spec:
return False
return True
def fullname(self, parent):
return f"{parent.metadata.name}-{self.name}"
def uri(self, parent, tag=None, hash_key=None, fullname=True):
name = self.fullname(parent) if fullname else self.name
return generate_object_uri(
parent.metadata.project,
name,
tag=tag or parent.metadata.tag,
hash_key=hash_key,
)
@property
def function_object(self):
return self._function
def to_function(self, default_kind=None):
if self.url and "://" not in self.url:
if not os.path.isfile(self.url):
raise OSError(f"{self.url} not found")
kind = self.kind or default_kind
if self.url:
if (
self.url.endswith(".yaml")
or self.url.startswith("db://")
or self.url.startswith("hub://")
):
func = mlrun.import_function(self.url)
if self.image:
func.spec.image = self.image
elif self.url.endswith(".ipynb"):
func = mlrun.code_to_function(
self.name, filename=self.url, image=self.image, kind=kind
)
elif self.url.endswith(".py"):
if not self.image:
raise ValueError(
"image must be provided with py code files, "
"use function object for more control/settings"
)
func = mlrun.code_to_function(
self.name, filename=self.url, image=self.image, kind=kind
)
else:
raise ValueError(f"unsupported function url {self.url} or no spec")
if self.spec:
func = enrich_function_from_dict(func, self.spec)
elif self.code is not None:
code = self.code
if kind == mlrun.runtimes.RuntimeKinds.serving:
code = code + mlrun_footer.format(
mlrun.runtimes.serving.serving_subkind
)
func = mlrun.new_function(self.name, kind=kind, image=self.image)
data = b64encode(code.encode("utf-8")).decode("utf-8")
func.spec.build.functionSourceCode = data
if kind not in mlrun.runtimes.RuntimeKinds.nuclio_runtimes():
func.spec.default_handler = "handler"
if self.spec:
func = enrich_function_from_dict(func, self.spec)
elif self.spec:
func = mlrun.new_function(self.name, runtime=self.spec)
else:
raise ValueError("url or spec or code must be specified")
if self.requirements:
func.with_requirements(self.requirements)
self._function = func
return func
@property
def address(self):
return self._address
def deploy(self, **kwargs):
self._address = self._function.deploy(**kwargs)
return self._address
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.