code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""Application helper utilities."""
import importlib
import pkgutil
from flask import Blueprint
__all__ = ('check_required_settings', 'register_blueprints',)
DOES_NOT_EXIST = '!@DNE@!' # Placeholder value to use for missing settings.
REQUIRED_SETTINGS = 'SECRET_KEY', 'SECURITY_PASSWORD_SALT'
def check_required_settings(config, keys=REQUIRED_SETTINGS):
"""Validate the presence of required settings."""
for key in keys:
if config.get(key, DOES_NOT_EXIST) == DOES_NOT_EXIST:
message = 'The {} configuration settings is required.'.format(key)
raise RuntimeError(message)
def register_blueprints(app, package_name, package_path):
"""Register all :class:`~flask.Blueprint` instances on the app."""
for _, name, _ in pkgutil.iter_modules(package_path):
m = importlib.import_module('{}.{}'.format(package_name, name))
for x in dir(m):
item = getattr(m, x)
if isinstance(item, Blueprint):
app.register_blueprint(item)
|
[
"pkgutil.iter_modules"
] |
[((770, 804), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['package_path'], {}), '(package_path)\n', (790, 804), False, 'import pkgutil\n')]
|
#
# tasks.py
# ========
#
# Copying
# -------
#
# Copyright (c) 2015 cf-propane authors and contributors.
#
# This file is part of the *cf-propane* project.
#
# cf-propane is a free software project. You can redistribute it and/or
# modify if under the terms of the MIT License.
#
# This software project is distributed *as is*, WITHOUT WARRANTY OF ANY
# KIND; including but not limited to the WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE and NONINFRINGEMENT.
#
# You should have received a copy of the MIT License along with
# cf-propane. If not, see <http://opensource.org/licenses/MIT>.
#
"""Management tasks definition to ease cf-propane development."""
import re
import os
import sys
import glob
import shutil
import yaml
import jinja2
from copy import deepcopy
from functools import reduce
from contextlib import suppress
from invoke import Collection, task, run
#
# Global definitions
# ------------------
ns = Collection()
#
# Utility functions
# -----------------
#
# Input / Output
# ^^^^^^^^^^^^^^
class msg(object):
"""Namespace for message printing operations.
:attribute REQUEST: Message is requesting an input from the user.
:attribute INFORMATION: Message is informative with no potential
impact.
:attribute WARNING: Message to attract user attention or to warn
about an unexpected behaviour but with little
impact.
:attribute ERROR: A error occured during task execution but did not
discontinue execution.
:attribute FATAL: A non-recoverable error occured and discontinued
task execution.
"""
_NOPREFIX = 1 << 0
_CONTINUE = 1 << 1
REQUEST = 1 << 2
INFORMATION = 1 << 3
WARNING = 1 << 4
ERROR = 1 << 5
FATAL = 1 << 6
_levels = {
_NOPREFIX: ('', sys.stdout),
_CONTINUE: ('..', sys.stdout),
REQUEST: ('>>', sys.stdout),
INFORMATION: ('ii', sys.stdout),
WARNING: ('!!', sys.stdout),
ERROR: ('EE', sys.stderr),
FATAL: ('XX', sys.stderr),
}
@classmethod
def write(cls, level, *lines):
"""Print *lines* to the standard output at given *level*.
:param int level: Level of the message to be printed. Allowed
values are:
- :attr:`msg.REQUEST`
- :attr:`msg.INFORMATION`
- :attr:`msg.WARNING`
- :attr:`msg.ERROR`
- :attr:`msg.FATAL`
:param str lines: Lines to be printed on screen.
"""
prefix, stream = cls._levels.get(
level, cls._levels[cls._NOPREFIX]
)
c_prefix, c_stream = prefix, stream
# Message upper than INFORMATION level should be visible to
# the user so the prefix for those messages is kept.
if level < cls.WARNING:
c_prefix, c_stream = cls._levels[cls._CONTINUE]
lines = list(lines)
print(prefix, lines.pop(0), sep=' ', file=stream)
for l in lines:
print(c_prefix, l, sep=' ', file=c_stream)
@classmethod
def ask(cls, *lines, **kwargs):
"""Request input from the user and return the result. *lines*
are printed using the :attr:`msg.REQUEST` level. The last
line should be the request to the user printed as such::
>> Enter your full name:_
In the previous example, *Enter your full name:* is the last
line provided and the character ``_`` represents a white space
which is added by this function.
If *request_only* is given, this should be only as a keyword
argument.
:param str lines: Lines to be printed on screen. If multiple
lines are given, the first lines are
information to the user while the last line is
the actual question.
:param bool request_only: Should only the request be printed and
skip the message? Default to ``False``.
:returns: Input provided by the user.
:rtype: str
"""
lines = list(lines)
request = '{prefix} {message} '.format(
prefix=cls._levels[cls.REQUEST][0], message=lines.pop()
)
if lines and not kwargs.get('request_only', False):
cls.write(cls.REQUEST, *lines)
return input(request)
@classmethod
def ask_yn(cls, *lines, **kwargs):
"""Ask a question on which the user has to reply *yes* or *no*.
*lines* are printed using the :attr:`msg.REQUEST` level. The last
line should be the question asked to the user and is printed as
such::
>> Question? [y/n]_
In above example, ``Question?`` is the last line provided and
the character ``_`` represents a white space which is added by
this function..
:param str lines: Lines to be printed on screen. If multiple
lines are given, the first lines are
information to the user while the last line is
the actual question.
:param bool default: When ``True``, *yes* will be the default
value if the user provides no entry. When
``False``, the default value is *no*. If
not given, question will be asked to the
user a maximum of 3 times before returning.
:returns: ``True`` if user's reply is *yes*, ``False`` if user's
reply is *no*. Returns ``None`` when no valid answer
could be gotten from the user.
:rtype: bool or None
"""
valid_yes_re = re.compile(r'y|yes|t|true|1', re.IGNORECASE)
valid_no_re = re.compile(r'n|no|f|false|0', re.IGNORECASE)
max_try = 2
# Prepare available options based on expected default answer.
default = kwargs.get('default')
opts = '[y/n]'
if default is True:
opts = '[Y/n]'
elif default is False:
opts = '[y/N]'
# Add options to the request.
lines = list(lines)
lines.append(
'{message} {opts}'.format(message=lines.pop(), opts=opts)
)
answer = cls.ask(*lines)
while max_try != 0:
if valid_yes_re.match(answer):
return True
elif valid_no_re.match(answer):
return False
elif default is not None:
return default
answer = cls.ask(*lines, request_only=True)
max_try -= 1
return None
#
# File system
# ^^^^^^^^^^^
class fs(object):
"""Namespace for filesystem related operations."""
@staticmethod
def shexpand(pattern):
"""Return a possibly-empty list of path names that match
*pattern*.
:param pattern: A string or a list of strings containing
shell-style wildcards.
:type pattern: str or iterable
:returns: A list of path names matching given *pattern*.
:rtype: list
"""
if isinstance(pattern, (str, bytes)):
it = [pattern, ]
else:
it = pattern
def _expand(x):
return glob.glob(os.path.expanduser(os.path.expandvars(x)))
# Flatten list of paths from _expand.
return [
item
for paths_list in map(_expand, it)
for item in paths_list
]
@staticmethod
def copytree(src, dst):
"""Copy the directory tree structure from *src* to recreate it
in the *dst* directory.
:param str src: Path to the source to replicate the directory
tree structure from.
:param str dst: Path to the destination directory to replicate
the directory tree structure.
"""
os.makedirs(dst, exist_ok=True)
for root, dirs, _ in os.walk(src):
for d in dirs:
os.makedirs(
os.path.join(root.replace(src, dst), d),
exist_ok=True
)
@classmethod
def lstree(cls, pattern, recursive=False, include_path=False):
"""List all files and directories found in given path.
:param iterable pattern: A string or list of strings containing
shell-style patterns of directories to
remove.
:param bool recursive: Should the content of any directory
found in ``path`` also be listed?
Default to **False**.
:param bool include_path: Should given ``path`` also be included
in returned list.
:returns: A list of items found in given path.
:rtype: list
"""
lst = []
for path in cls.shexpand(pattern):
for root, dirs, files in os.walk(path):
if root == path and include_path:
lst.append(root)
lst += map(lambda x: os.path.join(root, x), dirs)
lst += map(lambda x: os.path.join(root, x), files)
if not recursive:
break
return lst
@classmethod
def rmdir(cls, pattern, recursive=False):
"""Remove empty directory at *path*. When recursive is set to
``True``, run over the directory tree to remove any empty
directory found.
:param iterable pattern: A string or list of strings containing
shell-style patterns of directories to
remove.
:param bool recursive: Should *path* be walked through to remove
empty directories found in the tree?
"""
for path in sorted(cls.shexpand(pattern)):
if recursive:
# Remove empty directories in tree from deepest to
# shallowest.
for root, dirs, _ in os.walk(path, topdown=False):
for d in dirs:
with suppress(OSError):
os.rmdir(os.path.join(root, d))
with suppress(OSError):
os.rmdir(path)
@classmethod
def rmtree(cls, pattern):
"""Remove directory trees matching given Unix shell style
patterns.
:param iterable pattern: A string or list of strings containing
shell-style patterns of directories to
remove.
"""
for path in sorted(cls.shexpand(pattern)):
# Avoid two removal tries if path does not exist.
if not os.path.lexists(path):
continue
try:
# Try to remove path (and sub-paths) as a directory.
shutil.rmtree(path)
except OSError:
# Not a directory, try to remove path as a file.
with suppress(OSError):
os.remove(path)
@classmethod
def symlink(cls, source, link_name, force=False, target_is_directory=False):
"""Create a symbolic link pointing to *source* named *link_name*.
:param str source: Path of the target to link to.
:param str link_name: Path to the symbolic link to create.
:param bool force: If set to ``True`` if *link_name* exists, it
will be removed before creating the link.
Default to ``False``.
:param bool target_is_directory:
On Windows, a symlink represents either a file or a directory
and does not morph to the target dynamically. Symlink will be
created as a directory if set to ``True``. Default to
``False``.
:returns: ``True`` if symlink creation suceeded, ``False``
otherwise.
:rtype: bool
"""
with suppress(OSError):
if os.readlink(link_name) == source \
or os.path.abspath(os.readlink(link_name)) == source:
return True
if force:
if os.path.lexists(link_name):
cls.rmtree(link_name)
else:
try:
os.makedirs(link_name, exist_ok=True)
except OSError:
return False
cls.rmdir(link_name)
try:
os.symlink(
source, link_name, target_is_directory=target_is_directory
)
except (NotImplementedError, OSError):
return False
return True
#
# Docstring
# ^^^^^^^^^
class docstring(object):
"""Namespace for docstring operations.
:attribute COMMENT_START_WITH: Normal comment line identifier.
:attribute DOCSTRING_START_WITH: Docstring comment line identifier.
"""
COMMENT_START_WITH = '#'
DOCSTRING_START_WITH = '#:'
DOCSTRING_INDENT = 2
EXT_CF = '.cf'
EXT_RST = '.rst'
@classmethod
def extract(cls, path, dst, insert_code=False):
"""Extract specially formatted comment strings (a.k.a.
docstrings) from file and save the result in *dst*.
Docstring comments should start with ``#:``.
:param str path: Path of the file from which to extract the
docstrings.
:param str dst: Path to the file in which to write extracted
docstrings.
:param bool insert_code: Shall the documented code also be
inserted in the resulting document?
Defaults to ``False``.
:returns: ``True`` if result file has been written, ``False``
otherwise.
:rtype: bool
"""
docstring_start_re = re.compile(
r'{}\s?'.format(cls.DOCSTRING_START_WITH)
)
doclines = []
doc_app = doclines.append
with suppress(OSError), open(path, 'r') as fd:
code_block = False
for line in fd:
# Strip line to get the comment symbol on first position.
sline = line.strip()
# Start by looking if we have a docstring.
if sline.startswith(cls.DOCSTRING_START_WITH):
# Insert blank line between previous code block
# and next docstring line.
if code_block:
doc_app('\n')
code_block = False
ds_line = docstring_start_re.sub('', sline)
doc_app('{}\n'.format(ds_line))
# If this is a blank line and we are not writing code
# or if this is a comment line, skip.
elif (not sline and not code_block) \
or sline.startswith(cls.COMMENT_START_WITH):
continue
# Any other lines should be code to be inserted.
elif insert_code:
if not code_block:
doc_app('.. code-block:: cf3\n\n')
code_block = True
doc_app(
'{}{}\n'.format(
' ' * (cls.DOCSTRING_INDENT), line.rstrip()
)
)
if doclines:
with suppress(OSError), open(dst, 'w') as fd:
fd.writelines(doclines)
return True
return False
@classmethod
def to_dir(cls, src, dst, insert_code=False):
"""Given a cf-propane *src* directory, extract all the docstrings
from the source files and save the result in *dst*.
:param str src: Path to source code directory of a cf-propane
project.
:param str dst: Path to directory in which to save extracted
docstring files.
:param bool insert_code: Shall the documented code also be
included in the resulting document?
Defaults to ``False``.
"""
cf_files = [
(p, p.replace(src, dst).replace(cls.EXT_CF, cls.EXT_RST))
for p in sorted(fs.lstree(src, recursive=True))
if p.endswith(cls.EXT_CF) and not os.path.isdir(p)
]
if not cf_files:
return
fs.copytree(src, dst)
for source, dest in cf_files:
cls.extract(source, dest, insert_code)
fs.rmdir(dst, recursive=True)
#
# Working environment management
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class env(object):
"""Namespace for project's working environment management.
:attribute ENVIRONMENT_DEFAULTS: Default environment values.
"""
ENVIRONMENT_DEFAULTS = {
'project': {
'default_env': 'dev',
'build_d': 'build',
'skel_d': 'skel.d',
'src_d': 'src',
'lib_d': 'lib',
},
'doc': {
'insert_code': True,
'src_d': 'doc',
'target': 'html',
},
'src': {
'namespace': 'cfpropane',
},
}
ENVIRONMENT_DEFAULTS['doc']['build_d'] = os.path.join(
ENVIRONMENT_DEFAULTS['project']['build_d'], 'doc'
)
@classmethod
def dmap(cls, callback, *mapping, recurse=False):
"""Apply *callack* to every item of *mapping*. If *recurse* is
``True``, *callback* will also be applied to any child mapping
found.
.. note:: This method will alter given *mapping* if called
*callback* does so.
:param func callback: Function to be applied to the elements in
*mapping*.
:param dict mapping: Dictionary to apply *callback* on. Multiple
may be given.
:param bool recurse: Should *callback* also be applied to child
dictionaries? Defaults to ``False``.
"""
for m in mapping:
with suppress(AttributeError):
for k, v in tuple(m.items()):
if recurse and isinstance(v, dict):
cls.dmap(callback, v, recurse)
else:
callback(m, k, v)
@classmethod
def dflatten(cls, mapping, parent_key='', sep='.'):
"""Flatten nested directories to one depth level and join the
key names of child directories with *sep*.
.. code-block:: py
>>> d = {
... 'foo': 'foo',
... 'bar': {
... 'baz': 'baz',
... },
... }
>>> env.dflatten(d)
{'foo': 'foo', 'bar.baz': 'baz'}
:param dict mapping: Dictionary to be flatttened.
:param string parent_key: Name of the upper level key. Defaults
to empty string.
:param string sep: Separator to be used when joining the keys.
Defaults to ``.``.
.. note:: The default value for *sep* implies
that any key from *mapping* should
be a string if ``TypeError`` is
raised while concanating the key
with *sep*, this key will be
skipped.
"""
d = {}
for k, v in mapping.items():
try:
key = parent_key + sep + k if parent_key else k
except TypeError:
continue
if isinstance(v, dict):
cls.update(d, cls.dflatten(v, key, sep))
else:
d[key] = deepcopy(v)
return d
@classmethod
def load(cls, *pattern, use_defaults=False):
"""Load a environment file into a new namespace.
:param str pattern: Pattern of environment files to load. Give
multiple patterns to load all matching files
in the same name space.
:param bool use_defaults: Should returned environment be using
default values as a base?
:returns: Environment dictionary.
:rtype: dict
"""
environment = {}
if use_defaults:
environment = cls.ENVIRONMENT_DEFAULTS.copy()
def _load_include(mapping, key, val):
"""Update given dictionary with given ``include`` directive
item.
An ``include`` directive is any dictionary key named
``include`` with a path or a list of paths for value. The
file targeted by those paths should be a YAML environment
file to be added to given *mapping*.
This function will not loop over the dictionary and asumes
*key* and *val* is an item part of it.
:param dict mapping: Dictionary to look in and to be updated
by the ``include`` directives.
:param key: A key from *mapping*.
:param value: A value from *mapping*.
"""
if key == 'include':
with suppress(KeyError):
del mapping[key]
cls.update(mapping, cls.load(val))
# Try to look for inclue directives if list of dict.
with suppress(TypeError):
cls.dmap(_load_include, *val, recurse=True)
# end _load_include
# Defaults may have include directives.
cls.dmap(_load_include, environment, recurse=True)
for path in fs.shexpand(pattern):
loaded = {}
with open(path, 'r') as fp:
loaded = yaml.safe_load(fp)
cls.dmap(_load_include, loaded, recurse=True)
cls.update(environment, loaded)
return environment
@classmethod
def update(cls, target, *mapping):
"""Merge given target dictionary with given *mapping* object.
Unlike Python's dict.update() method, if the same key is present
in both dictionaries and the value for this key is a dictionary,
it will be updated instead of being replaced by the dictionary
from the *mapping* object. All other data types will be replaced.
For example::
>>> d1 = {'baz': {'foo': 'foo'}, 'fizz': 'buzz'}
>>> d2 = {'baz': {'bar': 'bar'}, 'fizz': 'fizzbuzz'}
>>> env.update(d1, d2)
>>> d1
{'baz': {'foo': 'foo', 'bar': 'bar'}, 'fizz': 'fizzbuzz'}
:param dict target: Dictionary to be updated.
:param dict mapping: Dictionary to be merged. Multiple may be
given.
"""
for m in mapping:
with suppress(AttributeError):
for k, v in tuple(m.items()):
if k in target and isinstance(v, dict):
cls.update(target[k], v)
else:
target[k] = deepcopy(v)
@classmethod
def expand_context_string(cls, context, value, marker='++'):
"""Expand any marked variable found in given string by their
corresponding value from the *context* dictionary.
The *marker* enclose a *context* variable in the dotted form. For
example, the following string ``This is a ++foo.bar++ example``
will be altered using the value from ``context['foo']['bar']``.
:param dict context: Templating context.
:param str value: String value to be altered.
:param str marker: Marker used to enclose context variables.
Defaults to ``++``.
"""
flat_context = cls.dflatten(context)
for sub in value.split(marker):
if sub in flat_context:
value = value.replace(sub, flat_context[sub])
return value.replace(marker, '')
@classmethod
def move_template(cls, context, *pattern, recursive=False, marker='++'):
"""In-place move any file where *marker* is part of the file
name and marked variable is found in given *context*.
:param dict context: Templating context.
:param str pattern: Location pattern to the file or directory to
move. Multiple may be given.
:param bool recursive: If path is a directory, recursively rename
any element found. Defaults to ``False``.
:param str marker: Marker used to enclose context variables.
Defaults to ``++``.
"""
lst = fs.lstree(pattern, recursive=recursive, include_path=True)
lst += [x for x in fs.shexpand(pattern) if os.path.isfile(x)]
if not lst:
return
move = []
for path in sorted(lst, reverse=True):
new_path = cls.expand_context_string(context, path)
if path != new_path:
move.append((path, new_path))
for src, dst in move:
with suppress(OSError):
shutil.move(src, dst)
@classmethod
def render_tree(cls, context, src, dst):
"""Run given *src* directories through the Jinja2 template engine
and render the result in the *dst* folder.
:param dict context: Context dictionary to pass to Jinja2.
:param dict src: Dictionary of source directories to be rendered.
Keys of this dictionary should be paths relative
to the destination and values the original path
for the template files::
>>> src = {
... '.': '/path/to/foo',
... 'bar': '/path/to/bar',
... }
Files from ``/path/to/foo`` will be rendered in
``dst/.`` while files from ``/path/to/bar``
will be in ``dst/bar``.
"""
loader = {k: jinja2.FileSystemLoader(v) for k, v in src.items()}
engine = jinja2.Environment(
extensions = ['jinja2.ext.loopcontrols', ],
loader = jinja2.PrefixLoader(loader),
trim_blocks = True,
lstrip_blocks = True
)
for name, path in src.items():
fs.copytree(path, os.path.join(dst, name))
for name in engine.list_templates():
with open(os.path.join(dst, name), 'w') as fp:
fp.write(engine.get_template(name).render(context))
@classmethod
def skel_create_agent(cls, environment, name, **kwargs):
"""Create a new agent called *name*.
:param dict environment: Current project environment.
:param str dst: Path to the alternative directory in which to
create the agent.
:param bool has_knowledge: Will the agent be gathering some
knowledge? Defaults to ``False``.
:param str namespace: Namespace in which the agent should be
defined. Defaults to ``cfpropane``.
"""
skel_d = environment['project']['skel_d']
dst = kwargs.get('dst', '') or os.path.join(
environment['project']['src_d'], 'agents', name
)
has_knowledge = kwargs.get('has_knowledge', False)
namespace = kwargs.get('namespace', '') \
or environment['src']['namespace']
tplsrc = {
'.': os.path.join(skel_d, 'agent'),
'.skel': os.path.join(skel_d, '.skel', 'templates'),
}
ctx = {
'agent': {
'has_knowledge': has_knowledge,
'name': name,
'namespace': namespace,
},
}
files = {
d: tuple([
cls.expand_context_string(
ctx, p.replace(os.path.join(tplsrc['.'], d, ''), '')
)
for p in fs.lstree(os.path.join(tplsrc['.'], d), True)
if os.path.isfile(p)
])
for d in [
p.replace(os.path.join(tplsrc['.'], ''), '')
for p in fs.lstree(tplsrc['.'])
if os.path.isdir(p)
]
}
if not has_knowledge:
with suppress(KeyError):
files.pop('knowledge')
template = {
f: os.path.join('.skel', f)
for f in [
p.replace(os.path.join(tplsrc['.skel'], ''), '')
for p in fs.lstree(tplsrc['.skel'], recursive=True)
if os.path.isfile(p)
]
}
cls.update(ctx, {
'agent': {
'dirnames': tuple(files.keys()),
'files': files,
},
'skel': {
'template': template,
},
})
cls.render_tree(ctx, tplsrc, dst)
if not has_knowledge:
fs.rmtree(os.path.join(dst, 'knowledge'))
cls.move_template(ctx, dst, recursive=True)
fs.rmtree(os.path.join(dst, '.skel'))
@classmethod
def update_context(cls, environment, *ctx):
"""Update Jinja2 context dictionary with useful elements from
the project.
:param dict environment: Current project environment.
:param dict ctx: Context dictionary to be updated.
"""
context_functions = (
cls.context_add_project,
cls.context_cf_input_files,
)
for c in ctx:
for fn in context_functions:
fn(environment, c)
@classmethod
def context_add_project(cls, environment, ctx):
"""Add project information from the environment into the context.
:param dict environment: Current project environment.
:param dict ctx: Context dictionary to be updated.
"""
cls.update(
ctx.setdefault('project', {}),
environment.get('project', {})
)
@staticmethod
def context_cf_input_files(environment, ctx):
"""List all the files to be used by CFEngine as input files.
:param dict environment: Current project environment.
:param dict ctx: Context dictionary to be updated.
"""
input_patterns = (
environment['project']['src_d'],
environment['project']['lib_d'],
)
exclude_files = (
'promises.cf',
'failsafe.cf',
)
src_sep = os.path.join(environment['project']['src_d'], '')
cf_files = (
f.replace(src_sep, '')
for f in fs.lstree(input_patterns, recursive=True)
if f.endswith('.cf') and os.path.isfile(f)
)
ctx_cf3 = ctx.setdefault('cf3', {})
ctx_cf3['input_files'] = tuple(
f
for f in cf_files
if not reduce(
lambda x, y: x or y,
map(lambda x: x in f, exclude_files)
)
)
#
# Task definitions
# ----------------
ENVIRONMENT = env.load('env.conf', use_defaults=True)
#
# Project tasks
# ^^^^^^^^^^^^^
@task(name='clean')
def project_clean():
"""Clean project folder from built files."""
build_d = ENVIRONMENT['project']['build_d']
src_d = ENVIRONMENT['project']['src_d']
lib_d = ENVIRONMENT['project']['lib_d']
bld_log = os.path.join(build_d, '.build')
patterns = [build_d, ]
if os.path.exists(bld_log):
with open(bld_log, 'r') as fp:
patterns = [x for x in fp.read().splitlines() if x]
patterns.append(bld_log)
lines = [x for x in fs.shexpand(patterns)]
if lines:
msg.write(msg.INFORMATION,
'Cleaning project', *sorted(lines, reverse=True))
fs.rmtree(patterns)
_proj_build_help = {
'environment': "Project environment to be built. Defaults to {}.".format(
ENVIRONMENT.get('default_env', 'dev')
),
}
@task(project_clean, name='build', help=_proj_build_help)
def project_build(environment=ENVIRONMENT.get('default_env', 'dev')):
"""Build the project."""
build_d = ENVIRONMENT['project']['build_d']
build_log = os.path.join(build_d, '.build')
proj_env = [
x
for x in ENVIRONMENT.get('environment', {})
if x.get('name', '') == environment
][0]
dirs = {
'.': ENVIRONMENT['project']['src_d'],
'lib': ENVIRONMENT['project']['lib_d'],
}
context = {}
with suppress(AttributeError):
context = {
k: v
for k, v in proj_env.get('variables', {}).items()
}
env.update_context(ENVIRONMENT, context)
rendered = [
os.path.join(build_d, origine.replace(path, name))
for name, path in dirs.items()
for origine in fs.lstree(path, recursive=True)
]
msg.write(msg.INFORMATION, 'Building project', *rendered)
env.render_tree(context, dirs, build_d)
with suppress(OSError), open(build_log, 'w') as fp:
fp.write('\n'.join(rendered))
#
# Project task namespace
# """"""""""""""""""""""
ns_proj = Collection('proj')
ns_proj.add_task(project_build)
ns_proj.add_task(project_clean)
ns.add_collection(ns_proj)
#
# Skeleton tasks
# ^^^^^^^^^^^^^^
_skel_agent_help = {
'name': "Name of the element to create.",
'destination': "Alternate destination for the created agent",
'knowledge': "Will the agent be gathering some knowledge. Default: False.",
}
@task(name='agent', help=_skel_agent_help)
def skel_agent(name, destination='', knowledge=False):
"""Create a new project element from skeleton."""
env.skel_create_agent(ENVIRONMENT, name,
dst=destination, has_knowledge=knowledge)
#
# Skeleton task namespace
# """""""""""""""""""""""
ns_skel = Collection('skel')
ns_skel.add_task(skel_agent)
ns.add_collection(ns_skel)
#
# Documentation tasks
# ^^^^^^^^^^^^^^^^^^^
@task(name='clean')
def doc_clean():
"""Clean project folder from built documentation files."""
patterns = [ENVIRONMENT['doc']['build_d'], ]
lines = [x for x in fs.shexpand(patterns)]
if lines:
msg.write(msg.INFORMATION,
'Cleaning documentation', *sorted(lines, reverse=True))
fs.rmtree(patterns)
_doc_build_help = {
'target': "Targeted documentation format. Default to {}.".format(
ENVIRONMENT['doc']['target']
),
'code': "Insert documented code into documentation. Default to {}.".format(
ENVIRONMENT['doc']['insert_code']
),
}
@task(doc_clean, name='build', help=_doc_build_help)
def doc_build(target=ENVIRONMENT['doc']['target'],
code=ENVIRONMENT['doc']['insert_code']):
"""Build documentation using Sphinx."""
build_d = ENVIRONMENT['doc']['build_d']
out_d = os.path.join(build_d, 'output', target)
src_d = os.path.join(build_d, ENVIRONMENT['project']['src_d'])
msg.write(msg.INFORMATION, 'Building documentation')
shutil.copytree(ENVIRONMENT['doc']['src_d'], build_d)
docstring.to_dir(
ENVIRONMENT['project']['src_d'],
src_d,
insert_code=code
)
run(
'sphinx-build -b {target} {build_d} {out_d}'.format(
**locals()
)
)
ns_doc = Collection('doc')
ns_doc.add_task(doc_build)
ns_doc.add_task(doc_clean)
ns.add_collection(ns_doc)
#
# Test tasks
# ^^^^^^^^^^
_test_promises_help = {
'file': "Input file to be tested.",
}
@task(project_build, name='promises', help=_test_promises_help)
def test_promises(file=''):
"""Test built promises."""
build_d = os.path.abspath(ENVIRONMENT['project']['build_d'])
inputs = os.path.abspath(fs.shexpand('~/.cfagent/inputs')[0])
opts = ['--full-check', '--warn', 'all']
if file:
opts.append('--file {}'.format(file))
if not fs.symlink(build_d, inputs, force=True, target_is_directory=True):
msg.write(msg.ERROR,
'Could not create symlink to build directory.')
sys.exit(1)
msg.write(msg.INFORMATION, 'Testing promises.')
result = run('cf-promises {}'.format(' '.join(opts)))
if result.ok:
msg.write(msg.INFORMATION, 'OK!')
ns_test = Collection('test')
ns_test.add_task(test_promises)
ns.add_collection(ns_test)
#
# Global tasks
# ^^^^^^^^^^^^
@task(project_build, doc_build, default=True)
def build():
"""Call all the build tasks to build the project."""
msg.write(msg.INFORMATION, 'Done!')
@task(doc_clean, project_clean)
def clean():
"""Clean the whole project tree from built files."""
patterns = [
ENVIRONMENT['project']['build_d'],
]
lines = [
x
for x in fs.lstree(patterns, recursive=True, include_path=True)
if os.path.isdir(x)
]
if lines:
msg.write(msg.INFORMATION,
'Cleaning environment', *sorted(lines, reverse=True))
msg.write(msg.INFORMATION, 'Done!')
fs.rmdir(patterns, recursive=True)
ns.add_task(build)
ns.add_task(clean)
|
[
"os.remove",
"jinja2.PrefixLoader",
"os.walk",
"os.path.isfile",
"yaml.safe_load",
"shutil.rmtree",
"os.path.join",
"os.path.lexists",
"os.path.abspath",
"os.path.exists",
"contextlib.suppress",
"jinja2.FileSystemLoader",
"invoke.task",
"copy.deepcopy",
"os.path.expandvars",
"invoke.Collection",
"os.rmdir",
"sys.exit",
"re.compile",
"os.makedirs",
"os.readlink",
"os.path.isdir",
"shutil.move",
"shutil.copytree",
"os.symlink"
] |
[((947, 959), 'invoke.Collection', 'Collection', ([], {}), '()\n', (957, 959), False, 'from invoke import Collection, task, run\n'), ((31693, 31711), 'invoke.task', 'task', ([], {'name': '"""clean"""'}), "(name='clean')\n", (31697, 31711), False, 'from invoke import Collection, task, run\n'), ((32510, 32566), 'invoke.task', 'task', (['project_clean'], {'name': '"""build"""', 'help': '_proj_build_help'}), "(project_clean, name='build', help=_proj_build_help)\n", (32514, 32566), False, 'from invoke import Collection, task, run\n'), ((33662, 33680), 'invoke.Collection', 'Collection', (['"""proj"""'], {}), "('proj')\n", (33672, 33680), False, 'from invoke import Collection, task, run\n'), ((34028, 34069), 'invoke.task', 'task', ([], {'name': '"""agent"""', 'help': '_skel_agent_help'}), "(name='agent', help=_skel_agent_help)\n", (34032, 34069), False, 'from invoke import Collection, task, run\n'), ((34359, 34377), 'invoke.Collection', 'Collection', (['"""skel"""'], {}), "('skel')\n", (34369, 34377), False, 'from invoke import Collection, task, run\n'), ((34485, 34503), 'invoke.task', 'task', ([], {'name': '"""clean"""'}), "(name='clean')\n", (34489, 34503), False, 'from invoke import Collection, task, run\n'), ((35096, 35147), 'invoke.task', 'task', (['doc_clean'], {'name': '"""build"""', 'help': '_doc_build_help'}), "(doc_clean, name='build', help=_doc_build_help)\n", (35100, 35147), False, 'from invoke import Collection, task, run\n'), ((35812, 35829), 'invoke.Collection', 'Collection', (['"""doc"""'], {}), "('doc')\n", (35822, 35829), False, 'from invoke import Collection, task, run\n'), ((36009, 36071), 'invoke.task', 'task', (['project_build'], {'name': '"""promises"""', 'help': '_test_promises_help'}), "(project_build, name='promises', help=_test_promises_help)\n", (36013, 36071), False, 'from invoke import Collection, task, run\n'), ((36745, 36763), 'invoke.Collection', 'Collection', (['"""test"""'], {}), "('test')\n", (36755, 36763), False, 'from invoke import Collection, task, run\n'), ((36860, 36904), 'invoke.task', 'task', (['project_build', 'doc_build'], {'default': '(True)'}), '(project_build, doc_build, default=True)\n', (36864, 36904), False, 'from invoke import Collection, task, run\n'), ((37018, 37048), 'invoke.task', 'task', (['doc_clean', 'project_clean'], {}), '(doc_clean, project_clean)\n', (37022, 37048), False, 'from invoke import Collection, task, run\n'), ((17590, 17653), 'os.path.join', 'os.path.join', (["ENVIRONMENT_DEFAULTS['project']['build_d']", '"""doc"""'], {}), "(ENVIRONMENT_DEFAULTS['project']['build_d'], 'doc')\n", (17602, 17653), False, 'import os\n'), ((31936, 31967), 'os.path.join', 'os.path.join', (['build_d', '""".build"""'], {}), "(build_d, '.build')\n", (31948, 31967), False, 'import os\n'), ((32003, 32026), 'os.path.exists', 'os.path.exists', (['bld_log'], {}), '(bld_log)\n', (32017, 32026), False, 'import os\n'), ((32732, 32763), 'os.path.join', 'os.path.join', (['build_d', '""".build"""'], {}), "(build_d, '.build')\n", (32744, 32763), False, 'import os\n'), ((35356, 35395), 'os.path.join', 'os.path.join', (['build_d', '"""output"""', 'target'], {}), "(build_d, 'output', target)\n", (35368, 35395), False, 'import os\n'), ((35410, 35464), 'os.path.join', 'os.path.join', (['build_d', "ENVIRONMENT['project']['src_d']"], {}), "(build_d, ENVIRONMENT['project']['src_d'])\n", (35422, 35464), False, 'import os\n'), ((35528, 35581), 'shutil.copytree', 'shutil.copytree', (["ENVIRONMENT['doc']['src_d']", 'build_d'], {}), "(ENVIRONMENT['doc']['src_d'], build_d)\n", (35543, 35581), False, 'import shutil\n'), ((36145, 36195), 'os.path.abspath', 'os.path.abspath', (["ENVIRONMENT['project']['build_d']"], {}), "(ENVIRONMENT['project']['build_d'])\n", (36160, 36195), False, 'import os\n'), ((5946, 5989), 're.compile', 're.compile', (['"""y|yes|t|true|1"""', 're.IGNORECASE'], {}), "('y|yes|t|true|1', re.IGNORECASE)\n", (5956, 5989), False, 'import re\n'), ((6014, 6057), 're.compile', 're.compile', (['"""n|no|f|false|0"""', 're.IGNORECASE'], {}), "('n|no|f|false|0', re.IGNORECASE)\n", (6024, 6057), False, 'import re\n'), ((8161, 8192), 'os.makedirs', 'os.makedirs', (['dst'], {'exist_ok': '(True)'}), '(dst, exist_ok=True)\n', (8172, 8192), False, 'import os\n'), ((8222, 8234), 'os.walk', 'os.walk', (['src'], {}), '(src)\n', (8229, 8234), False, 'import os\n'), ((31054, 31103), 'os.path.join', 'os.path.join', (["environment['project']['src_d']", '""""""'], {}), "(environment['project']['src_d'], '')\n", (31066, 31103), False, 'import os\n'), ((33038, 33062), 'contextlib.suppress', 'suppress', (['AttributeError'], {}), '(AttributeError)\n', (33046, 33062), False, 'from contextlib import suppress\n'), ((33512, 33529), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (33520, 33529), False, 'from contextlib import suppress\n'), ((36550, 36561), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (36558, 36561), False, 'import sys\n'), ((9239, 9252), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (9246, 9252), False, 'import os\n'), ((12283, 12300), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (12291, 12300), False, 'from contextlib import suppress\n'), ((12483, 12509), 'os.path.lexists', 'os.path.lexists', (['link_name'], {}), '(link_name)\n', (12498, 12509), False, 'import os\n'), ((12774, 12844), 'os.symlink', 'os.symlink', (['source', 'link_name'], {'target_is_directory': 'target_is_directory'}), '(source, link_name, target_is_directory=target_is_directory)\n', (12784, 12844), False, 'import os\n'), ((14305, 14322), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (14313, 14322), False, 'from contextlib import suppress\n'), ((26516, 26542), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['v'], {}), '(v)\n', (26539, 26542), False, 'import jinja2\n'), ((27728, 27789), 'os.path.join', 'os.path.join', (["environment['project']['src_d']", '"""agents"""', 'name'], {}), "(environment['project']['src_d'], 'agents', name)\n", (27740, 27789), False, 'import os\n'), ((28013, 28042), 'os.path.join', 'os.path.join', (['skel_d', '"""agent"""'], {}), "(skel_d, 'agent')\n", (28025, 28042), False, 'import os\n'), ((28065, 28107), 'os.path.join', 'os.path.join', (['skel_d', '""".skel"""', '"""templates"""'], {}), "(skel_d, '.skel', 'templates')\n", (28077, 28107), False, 'import os\n'), ((28937, 28961), 'os.path.join', 'os.path.join', (['""".skel"""', 'f'], {}), "('.skel', f)\n", (28949, 28961), False, 'import os\n'), ((29609, 29635), 'os.path.join', 'os.path.join', (['dst', '""".skel"""'], {}), "(dst, '.skel')\n", (29621, 29635), False, 'import os\n'), ((37293, 37309), 'os.path.isdir', 'os.path.isdir', (['x'], {}), '(x)\n', (37306, 37309), False, 'import os\n'), ((10334, 10362), 'os.walk', 'os.walk', (['path'], {'topdown': '(False)'}), '(path, topdown=False)\n', (10341, 10362), False, 'import os\n'), ((10525, 10542), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (10533, 10542), False, 'from contextlib import suppress\n'), ((10560, 10574), 'os.rmdir', 'os.rmdir', (['path'], {}), '(path)\n', (10568, 10574), False, 'import os\n'), ((11039, 11060), 'os.path.lexists', 'os.path.lexists', (['path'], {}), '(path)\n', (11054, 11060), False, 'import os\n'), ((11190, 11209), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (11203, 11209), False, 'import shutil\n'), ((15731, 15748), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (15739, 15748), False, 'from contextlib import suppress\n'), ((18428, 18452), 'contextlib.suppress', 'suppress', (['AttributeError'], {}), '(AttributeError)\n', (18436, 18452), False, 'from contextlib import suppress\n'), ((20165, 20176), 'copy.deepcopy', 'deepcopy', (['v'], {}), '(v)\n', (20173, 20176), False, 'from copy import deepcopy\n'), ((21845, 21864), 'contextlib.suppress', 'suppress', (['TypeError'], {}), '(TypeError)\n', (21853, 21864), False, 'from contextlib import suppress\n'), ((22193, 22211), 'yaml.safe_load', 'yaml.safe_load', (['fp'], {}), '(fp)\n', (22207, 22211), False, 'import yaml\n'), ((23246, 23270), 'contextlib.suppress', 'suppress', (['AttributeError'], {}), '(AttributeError)\n', (23254, 23270), False, 'from contextlib import suppress\n'), ((25203, 25220), 'os.path.isfile', 'os.path.isfile', (['x'], {}), '(x)\n', (25217, 25220), False, 'import os\n'), ((25518, 25535), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (25526, 25535), False, 'from contextlib import suppress\n'), ((25553, 25574), 'shutil.move', 'shutil.move', (['src', 'dst'], {}), '(src, dst)\n', (25564, 25574), False, 'import shutil\n'), ((26682, 26709), 'jinja2.PrefixLoader', 'jinja2.PrefixLoader', (['loader'], {}), '(loader)\n', (26701, 26709), False, 'import jinja2\n'), ((26856, 26879), 'os.path.join', 'os.path.join', (['dst', 'name'], {}), '(dst, name)\n', (26868, 26879), False, 'import os\n'), ((28841, 28859), 'contextlib.suppress', 'suppress', (['KeyError'], {}), '(KeyError)\n', (28849, 28859), False, 'from contextlib import suppress\n'), ((29506, 29536), 'os.path.join', 'os.path.join', (['dst', '"""knowledge"""'], {}), "(dst, 'knowledge')\n", (29518, 29536), False, 'import os\n'), ((7550, 7571), 'os.path.expandvars', 'os.path.expandvars', (['x'], {}), '(x)\n', (7568, 7571), False, 'import os\n'), ((12317, 12339), 'os.readlink', 'os.readlink', (['link_name'], {}), '(link_name)\n', (12328, 12339), False, 'import os\n'), ((12608, 12645), 'os.makedirs', 'os.makedirs', (['link_name'], {'exist_ok': '(True)'}), '(link_name, exist_ok=True)\n', (12619, 12645), False, 'import os\n'), ((21654, 21672), 'contextlib.suppress', 'suppress', (['KeyError'], {}), '(KeyError)\n', (21662, 21672), False, 'from contextlib import suppress\n'), ((26949, 26972), 'os.path.join', 'os.path.join', (['dst', 'name'], {}), '(dst, name)\n', (26961, 26972), False, 'import os\n'), ((31260, 31277), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (31274, 31277), False, 'import os\n'), ((9379, 9400), 'os.path.join', 'os.path.join', (['root', 'x'], {}), '(root, x)\n', (9391, 9400), False, 'import os\n'), ((9445, 9466), 'os.path.join', 'os.path.join', (['root', 'x'], {}), '(root, x)\n', (9457, 9466), False, 'import os\n'), ((11324, 11341), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (11332, 11341), False, 'from contextlib import suppress\n'), ((11363, 11378), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (11372, 11378), False, 'import os\n'), ((12386, 12408), 'os.readlink', 'os.readlink', (['link_name'], {}), '(link_name)\n', (12397, 12408), False, 'import os\n'), ((16681, 16697), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (16694, 16697), False, 'import os\n'), ((23489, 23500), 'copy.deepcopy', 'deepcopy', (['v'], {}), '(v)\n', (23497, 23500), False, 'from copy import deepcopy\n'), ((28568, 28585), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (28582, 28585), False, 'import os\n'), ((28650, 28679), 'os.path.join', 'os.path.join', (["tplsrc['.']", '""""""'], {}), "(tplsrc['.'], '')\n", (28662, 28679), False, 'import os\n'), ((28752, 28768), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (28765, 28768), False, 'import os\n'), ((29011, 29044), 'os.path.join', 'os.path.join', (["tplsrc['.skel']", '""""""'], {}), "(tplsrc['.skel'], '')\n", (29023, 29044), False, 'import os\n'), ((29137, 29154), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (29151, 29154), False, 'import os\n'), ((10428, 10445), 'contextlib.suppress', 'suppress', (['OSError'], {}), '(OSError)\n', (10436, 10445), False, 'from contextlib import suppress\n'), ((28422, 28454), 'os.path.join', 'os.path.join', (["tplsrc['.']", 'd', '""""""'], {}), "(tplsrc['.'], d, '')\n", (28434, 28454), False, 'import os\n'), ((28513, 28541), 'os.path.join', 'os.path.join', (["tplsrc['.']", 'd'], {}), "(tplsrc['.'], d)\n", (28525, 28541), False, 'import os\n'), ((10484, 10505), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (10496, 10505), False, 'import os\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_provision
short_description: Manage BIG-IP module provisioning
description:
- Manage BIG-IP module provisioning. This module will only provision at the
standard levels of Dedicated, Nominal, and Minimum.
version_added: 2.4
options:
module:
description:
- The module to provision in BIG-IP.
required: true
choices:
- am
- afm
- apm
- asm
- avr
- cgnat
- fps
- gtm
- ilx
- lc
- ltm
- pem
- sam
- swg
- vcmp
aliases:
- name
level:
description:
- Sets the provisioning level for the requested modules. Changing the
level for one module may require modifying the level of another module.
For example, changing one module to C(dedicated) requires setting all
others to C(none). Setting the level of a module to C(none) means that
the module is not activated.
- This parameter is not relevant to C(cgnat) and will not be applied to the
C(cgnat) module.
default: nominal
choices:
- dedicated
- nominal
- minimum
state:
description:
- The state of the provisioned module on the system. When C(present),
guarantees that the specified module is provisioned at the requested
level provided that there are sufficient resources on the device (such
as physical RAM) to support the provisioned module. When C(absent),
de-provision the module.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- <NAME> (@caphrim007)
'''
EXAMPLES = r'''
- name: Provision PEM at "nominal" level
bigip_provision:
server: lb.mydomain.com
module: pem
level: nominal
password: <PASSWORD>
user: admin
validate_certs: no
delegate_to: localhost
- name: Provision a dedicated SWG. This will unprovision every other module
bigip_provision:
server: lb.mydomain.com
module: swg
password: <PASSWORD>
level: dedicated
user: admin
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
level:
description: The new provisioning level of the module.
returned: changed
type: string
sample: minimum
'''
import time
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.bigip.contexts import TransactionContextManager
from f5.sdk_exception import LazyAttributesRequired
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.bigip.contexts import TransactionContextManager
from f5.sdk_exception import LazyAttributesRequired
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_attributes = ['level']
returnables = ['level']
updatables = ['level', 'cgnat']
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
except Exception:
return result
@property
def level(self):
if self._values['level'] is None:
return None
if self.state == 'absent':
return 'none'
return str(self._values['level'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
pass
class UsableChanges(Parameters):
pass
class ReportableChanges(Parameters):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def cgnat(self):
if self.want.module == 'cgnat':
if self.want.state == 'absent' and self.have.enabled is True:
return True
if self.want.state == 'present' and self.have.disabled is True:
return True
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return False
return self.update()
def exists(self):
if self.want.module == 'cgnat':
resource = self.client.api.tm.sys.feature_module.cgnat.load()
if resource.disabled is True:
return False
elif resource.enabled is True:
return True
try:
for x in range(0, 5):
provision = self.client.api.tm.sys.provision
resource = getattr(provision, self.want.module)
resource = resource.load()
result = resource.attrs
if str(result['level']) != 'none' and self.want.level == 'none':
return True
if str(result['level']) == 'none' and self.want.level == 'none':
return False
if str(result['level']) == self.want.level:
return True
return False
except Exception as ex:
if 'not registered' in str(ex):
return False
time.sleep(1)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
result = self.update_on_device()
if self.want.module == 'cgnat':
return result
self._wait_for_module_provisioning()
if self.want.module == 'vcmp':
self._wait_for_reboot()
self._wait_for_module_provisioning()
if self.want.module == 'asm':
self._wait_for_asm_ready()
if self.want.module == 'afm':
self._wait_for_afm_ready()
return True
def should_reboot(self):
for x in range(0, 24):
try:
resource = self.client.api.tm.sys.dbs.db.load(name='provision.action')
if resource.value == 'reboot':
return True
elif resource.value == 'none':
time.sleep(5)
except Exception:
time.sleep(5)
return False
def reboot_device(self):
nops = 0
last_reboot = self._get_last_reboot()
try:
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "/sbin/reboot"'
)
if hasattr(output, 'commandResult'):
return str(output.commandResult)
except Exception:
pass
# Sleep a little to let rebooting take effect
time.sleep(20)
while nops < 3:
try:
self.client.reconnect()
next_reboot = self._get_last_reboot()
if next_reboot is None:
nops = 0
if next_reboot == last_reboot:
nops = 0
else:
nops += 1
except Exception as ex:
# This can be caused by restjavad restarting.
pass
time.sleep(10)
return None
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
if self.want.module == 'cgnat':
if self.changes.cgnat:
return self.provision_cgnat_on_device()
return False
elif self.want.level == 'dedicated':
self.provision_dedicated_on_device()
else:
self.provision_non_dedicated_on_device()
def provision_cgnat_on_device(self):
resource = self.client.api.tm.sys.feature_module.cgnat.load()
resource.modify(
enabled=True
)
return True
def provision_dedicated_on_device(self):
params = self.want.api_params()
tx = self.client.api.tm.transactions.transaction
collection = self.client.api.tm.sys.provision.get_collection()
resources = [x['name'] for x in collection if x['name'] != self.want.module]
with TransactionContextManager(tx) as api:
provision = api.tm.sys.provision
for resource in resources:
resource = getattr(provision, resource)
resource = resource.load()
resource.update(level='none')
resource = getattr(provision, self.want.module)
resource = resource.load()
resource.update(**params)
def provision_non_dedicated_on_device(self):
params = self.want.api_params()
provision = self.client.api.tm.sys.provision
resource = getattr(provision, self.want.module)
resource = resource.load()
resource.update(**params)
def read_current_from_device(self):
if self.want.module == 'cgnat':
resource = self.client.api.tm.sys.feature_module.cgnat.load()
result = resource.attrs
else:
provision = self.client.api.tm.sys.provision
resource = getattr(provision, str(self.want.module))
resource = resource.load()
result = resource.attrs
return ApiParameters(params=result)
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
result = self.remove_from_device()
if self.want.module == 'cgnat':
return result
self._wait_for_module_provisioning()
# For vCMP, because it has to reboot, we also wait for mcpd to become available
# before "moving on", or else the REST API would not be available and subsequent
# Tasks would fail.
if self.want.module == 'vcmp':
self._wait_for_reboot()
self._wait_for_module_provisioning()
if self.should_reboot():
self.save_on_device()
self.reboot_device()
self._wait_for_module_provisioning()
if self.exists():
raise F5ModuleError("Failed to de-provision the module")
return True
def save_on_device(self):
command = 'tmsh save sys config'
self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(command)
)
def remove_from_device(self):
if self.want.module == 'cgnat':
if self.changes.cgnat:
return self.deprovision_cgnat_on_device()
return False
provision = self.client.api.tm.sys.provision
resource = getattr(provision, self.want.module)
resource = resource.load()
resource.update(level='none')
def deprovision_cgnat_on_device(self):
resource = self.client.api.tm.sys.feature_module.cgnat.load()
resource.modify(
disabled=True
)
return True
def _wait_for_module_provisioning(self):
# To prevent things from running forever, the hack is to check
# for mprov's status twice. If mprov is finished, then in most
# cases (not ASM) the provisioning is probably ready.
nops = 0
# Sleep a little to let provisioning settle and begin properly
time.sleep(5)
while nops < 3:
try:
if not self._is_mprov_running_on_device():
nops += 1
else:
nops = 0
except Exception:
# This can be caused by restjavad restarting.
try:
self.client.reconnect()
except Exception:
pass
time.sleep(5)
def _is_mprov_running_on_device(self):
# /usr/libexec/qemu-kvm is added here to prevent vcmp provisioning
# from never allowing the mprov provisioning to succeed.
#
# It turns out that the 'mprov' string is found when enabling vcmp. The
# qemu-kvm command that is run includes it.
#
# For example,
# /usr/libexec/qemu-kvm -rt-usecs 880 ... -mem-path /dev/mprov/vcmp -f5-tracing ...
#
try:
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "ps aux | grep \'[m]prov\' | grep -v /usr/libexec/qemu-kvm"'
)
if hasattr(output, 'commandResult'):
return True
except Exception:
pass
return False
def _wait_for_asm_ready(self):
"""Waits specifically for ASM
On older versions, ASM can take longer to actually start up than
all the previous checks take. This check here is specifically waiting for
the Policies API to stop raising errors
:return:
"""
nops = 0
restarted_asm = False
while nops < 3:
try:
policies = self.client.api.tm.asm.policies_s.get_collection()
if len(policies) >= 0:
nops += 1
else:
nops = 0
except Exception as ex:
if not restarted_asm:
self._restart_asm()
restarted_asm = True
time.sleep(5)
def _wait_for_afm_ready(self):
"""Waits specifically for AFM
AFM can take longer to actually start up than all the previous checks take.
This check here is specifically waiting for the Security API to stop raising
errors.
:return:
"""
nops = 0
while nops < 3:
try:
security = self.client.api.tm.security.get_collection()
if len(security) >= 0:
nops += 1
else:
nops = 0
except Exception as ex:
pass
time.sleep(5)
def _restart_asm(self):
try:
self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "bigstart restart asm"'
)
time.sleep(60)
return True
except Exception:
pass
return None
def _get_last_reboot(self):
try:
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "/usr/bin/last reboot | head -1"'
)
if hasattr(output, 'commandResult'):
return str(output.commandResult)
except Exception:
pass
return None
def _wait_for_reboot(self):
nops = 0
last_reboot = self._get_last_reboot()
# Sleep a little to let provisioning settle and begin properly
time.sleep(5)
while nops < 6:
try:
self.client.reconnect()
next_reboot = self._get_last_reboot()
if next_reboot is None:
nops = 0
if next_reboot == last_reboot:
nops = 0
else:
nops += 1
except Exception as ex:
# This can be caused by restjavad restarting.
pass
time.sleep(10)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
module=dict(
required=True,
choices=[
'afm', 'am', 'sam', 'asm', 'avr', 'fps',
'gtm', 'lc', 'ltm', 'pem', 'swg', 'ilx',
'apm', 'vcmp', 'cgnat'
],
aliases=['name']
),
level=dict(
default='nominal',
choices=['nominal', 'dedicated', 'minimum']
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['parameters', 'parameters_src']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
[
"ansible.module_utils.network.f5.bigip.F5Client",
"ansible.module_utils.network.f5.common.cleanup_tokens",
"time.sleep",
"ansible.module_utils.network.f5.common.F5ModuleError",
"f5.bigip.contexts.TransactionContextManager",
"ansible.module_utils.basic.AnsibleModule"
] |
[((19612, 19754), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'spec.argument_spec', 'supports_check_mode': 'spec.supports_check_mode', 'mutually_exclusive': 'spec.mutually_exclusive'}), '(argument_spec=spec.argument_spec, supports_check_mode=spec.\n supports_check_mode, mutually_exclusive=spec.mutually_exclusive)\n', (19625, 19754), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((9972, 9986), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (9982, 9986), False, 'import time\n'), ((14652, 14665), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (14662, 14665), False, 'import time\n'), ((18140, 18153), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (18150, 18153), False, 'import time\n'), ((19898, 19923), 'ansible.module_utils.network.f5.bigip.F5Client', 'F5Client', ([], {}), '(**module.params)\n', (19906, 19923), False, 'from ansible.module_utils.network.f5.bigip import F5Client\n'), ((20024, 20046), 'ansible.module_utils.network.f5.common.cleanup_tokens', 'cleanup_tokens', (['client'], {}), '(client)\n', (20038, 20046), False, 'from ansible.module_utils.network.f5.common import cleanup_tokens\n'), ((6808, 6861), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['"""The python f5-sdk module is required"""'], {}), "('The python f5-sdk module is required')\n", (6821, 6861), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((10451, 10465), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (10461, 10465), False, 'import time\n'), ((11482, 11511), 'f5.bigip.contexts.TransactionContextManager', 'TransactionContextManager', (['tx'], {}), '(tx)\n', (11507, 11511), False, 'from f5.bigip.contexts import TransactionContextManager\n'), ((13464, 13514), 'ansible.module_utils.network.f5.common.F5ModuleError', 'F5ModuleError', (['"""Failed to de-provision the module"""'], {}), "('Failed to de-provision the module')\n", (13477, 13514), False, 'from ansible.module_utils.network.f5.common import F5ModuleError\n'), ((15076, 15089), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (15086, 15089), False, 'import time\n'), ((16657, 16670), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (16667, 16670), False, 'import time\n'), ((17279, 17292), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (17289, 17292), False, 'import time\n'), ((17491, 17505), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (17501, 17505), False, 'import time\n'), ((18618, 18632), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (18628, 18632), False, 'import time\n'), ((20123, 20145), 'ansible.module_utils.network.f5.common.cleanup_tokens', 'cleanup_tokens', (['client'], {}), '(client)\n', (20137, 20145), False, 'from ansible.module_utils.network.f5.common import cleanup_tokens\n'), ((8446, 8459), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8456, 8459), False, 'import time\n'), ((9481, 9494), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (9491, 9494), False, 'import time\n'), ((9421, 9434), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (9431, 9434), False, 'import time\n')]
|
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from api import db, ma
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(20), unique=True)
email = db.Column(db.String(50), unique=True)
created = db.Column(db.DateTime)
def __init__(self, username, email):
self.username = username
self.email = email
self.created = datetime.now()
class UserSchema(ma.Schema):
class Meta:
model = User
fields = ('id', 'username', 'email')
|
[
"datetime.datetime.now",
"api.db.Column",
"api.db.String"
] |
[((125, 164), 'api.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (134, 164), False, 'from api import db, ma\n'), ((284, 306), 'api.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (293, 306), False, 'from api import db, ma\n'), ((192, 205), 'api.db.String', 'db.String', (['(20)'], {}), '(20)\n', (201, 205), False, 'from api import db, ma\n'), ((242, 255), 'api.db.String', 'db.String', (['(50)'], {}), '(50)\n', (251, 255), False, 'from api import db, ma\n'), ((433, 447), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (445, 447), False, 'from datetime import datetime\n')]
|
import time
t_start=time.time()
from prettytable import PrettyTable
import datetime
file = open('testresult.txt','w')
file.write("Query: Enter Query Here\n\n")
file.write("Output Location: Enter location here\n\n")
file.write("Test start time: "+str(datetime.datetime.now())+"\n\n")
t= PrettyTable(['Start Time','Run','No of Records', 'Time(s)','Errors'])
for i in range(10):
t.add_row([datetime.datetime.now(), str(i+1), 'hey', 'there','0'])
t_end=time.time()
total_execution_time= t_end-t_start
file.write(str(t))
file.write("\nTotal Test Execution Time: "+str(total_execution_time)+" seconds")
file.close()
|
[
"datetime.datetime.now",
"prettytable.PrettyTable",
"time.time"
] |
[((21, 32), 'time.time', 'time.time', ([], {}), '()\n', (30, 32), False, 'import time\n'), ((298, 370), 'prettytable.PrettyTable', 'PrettyTable', (["['Start Time', 'Run', 'No of Records', 'Time(s)', 'Errors']"], {}), "(['Start Time', 'Run', 'No of Records', 'Time(s)', 'Errors'])\n", (309, 370), False, 'from prettytable import PrettyTable\n'), ((470, 481), 'time.time', 'time.time', ([], {}), '()\n', (479, 481), False, 'import time\n'), ((407, 430), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (428, 430), False, 'import datetime\n'), ((261, 284), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (282, 284), False, 'import datetime\n')]
|
import sys
sys.path.append("./")
from brainrender.Utils.data_io import connected_to_internet
import requests
import time
mouselight_base_url = "http://ml-neuronbrowser.janelia.org/"
def request(url):
"""
Sends a request to a url
:param url:
"""
if not connected_to_internet():
raise ConnectionError("You need to have an internet connection to send requests.")
response = requests.get(url)
if response.ok:
return response
else:
exception_string = 'URL request failed: {}'.format(response.reason)
raise ValueError(exception_string)
def query_mouselight(query):
"""
Sends a GET request, not currently used for anything.
:param query:
"""
if not connected_to_internet():
raise ConnectionError("You need an internet connection for API queries, sorry.")
base_url = "http://ml-neuronbrowser.janelia.org/"
full_query = base_url + query
# send the query, package the return argument as a json tree
response = requests.get(full_query)
if response.ok:
json_tree = response.json()
if json_tree['success']:
return json_tree
else:
exception_string = 'did not complete api query successfully'
else:
exception_string = 'API failure. Allen says: {}'.format(
response.reason)
# raise an exception if the API request failed
raise ValueError(exception_string)
def post_mouselight(url, query=None, clean=False, attempts=3):
"""
sends a POST request to a user URL. Query can be either a string (in which case clean should be False) or a dictionary.
:param url:
:param query: string or dictionary with query (Default value = None)
:param clean: if not clean, the query is assumed to be in JSON format (Default value = False)
:param attempts: number of attempts (Default value = 3)
"""
if not connected_to_internet():
raise ConnectionError("You need an internet connection for API queries, sorry.")
request = None
if query is not None:
for i in range(attempts):
try:
if not clean:
time.sleep(0.01) # avoid getting an error from server
request = requests.post(url, json={'query': query})
else:
time.sleep(0.01) # avoid getting an error from server
request = requests.post(url, json=query)
except Exception as e:
exception = e
request = None
print('MouseLight API query failed. Attempt {} of {}'.format(i+1, attempts))
if request is not None: break
if request is None:
raise ConnectionError("\n\nMouseLight API query failed with error message:\n{}.\
\nPerhaps the server is down, visit 'http://ml-neuronbrowser.janelia.org' to find out.".format(exception))
else:
raise NotImplementedError
if request.status_code == 200:
jreq = request.json()
if 'data' in list(jreq.keys()):
return jreq['data']
else:
return jreq
else:
raise Exception("Query failed to run by returning code of {}. {} -- \n\n{}".format(request.status_code, query, request.text))
|
[
"sys.path.append",
"time.sleep",
"requests.get",
"requests.post",
"brainrender.Utils.data_io.connected_to_internet"
] |
[((11, 32), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (26, 32), False, 'import sys\n'), ((388, 405), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (400, 405), False, 'import requests\n'), ((943, 967), 'requests.get', 'requests.get', (['full_query'], {}), '(full_query)\n', (955, 967), False, 'import requests\n'), ((266, 289), 'brainrender.Utils.data_io.connected_to_internet', 'connected_to_internet', ([], {}), '()\n', (287, 289), False, 'from brainrender.Utils.data_io import connected_to_internet\n'), ((675, 698), 'brainrender.Utils.data_io.connected_to_internet', 'connected_to_internet', ([], {}), '()\n', (696, 698), False, 'from brainrender.Utils.data_io import connected_to_internet\n'), ((1749, 1772), 'brainrender.Utils.data_io.connected_to_internet', 'connected_to_internet', ([], {}), '()\n', (1770, 1772), False, 'from brainrender.Utils.data_io import connected_to_internet\n'), ((1956, 1972), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1966, 1972), False, 'import time\n'), ((2025, 2066), 'requests.post', 'requests.post', (['url'], {'json': "{'query': query}"}), "(url, json={'query': query})\n", (2038, 2066), False, 'import requests\n'), ((2082, 2098), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2092, 2098), False, 'import time\n'), ((2151, 2181), 'requests.post', 'requests.post', (['url'], {'json': 'query'}), '(url, json=query)\n', (2164, 2181), False, 'import requests\n')]
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import numpy as np
from pyquil.paulis import PauliTerm, PauliSum
import pyquil.api as api
from scipy.optimize import minimize
from grove.pyqaoa.qaoa import QAOA
CXN = api.QVMConnection()
def numpart_qaoa(asset_list, A=1.0, minimizer_kwargs=None, steps=1):
"""
generate number partition driver and cost functions
:param asset_list: list to binary parition
:param A: (float) optional constant for level separation. Default=1.
:param minimizer_kwargs: Arguments for the QAOA minimizer
:param steps: (int) number of steps approximating the solution.
"""
cost_operators = []
ref_operators = []
for ii in range(len(asset_list)):
for jj in range(ii + 1, len(asset_list)):
cost_operators.append(PauliSum([PauliTerm("Z", ii, 2*asset_list[ii]) *
PauliTerm("Z", jj, A*asset_list[jj])]))
ref_operators.append(PauliSum([PauliTerm("X", ii, -1.0)]))
cost_operators.append(PauliSum([PauliTerm("I", 0, len(asset_list))]))
if minimizer_kwargs is None:
minimizer_kwargs = {'method': 'Nelder-Mead',
'options': {'ftol': 1.0e-2,
'xtol': 1.0e-2,
'disp': True}}
n_qubits = len(asset_list)
qaoa_inst = QAOA(CXN, n_qubits, steps=steps, cost_ham=cost_operators,
ref_hamiltonian=ref_operators, store_basis=True,
minimizer=minimize, minimizer_kwargs=minimizer_kwargs,
vqe_options={'disp': True})
return qaoa_inst
if __name__ == "__main__":
# Sample Run.
# result should be an even partition of nodes
inst = numpart_qaoa([1, 1, 1, 1, 1, 1], A=1.0, steps=1)
betas, gammas = inst.get_angles()
print(betas)
print(gammas)
probs = inst.probabilities(np.hstack((betas, gammas)))
for state, prob in zip(inst.states, probs):
print(state, prob)
print("Most frequent bitstring from sampling")
most_freq_string, sampling_results = inst.get_string(
betas, gammas, samples=100)
print(most_freq_string)
|
[
"pyquil.api.QVMConnection",
"pyquil.paulis.PauliTerm",
"grove.pyqaoa.qaoa.QAOA",
"numpy.hstack"
] |
[((939, 958), 'pyquil.api.QVMConnection', 'api.QVMConnection', ([], {}), '()\n', (956, 958), True, 'import pyquil.api as api\n'), ((2098, 2297), 'grove.pyqaoa.qaoa.QAOA', 'QAOA', (['CXN', 'n_qubits'], {'steps': 'steps', 'cost_ham': 'cost_operators', 'ref_hamiltonian': 'ref_operators', 'store_basis': '(True)', 'minimizer': 'minimize', 'minimizer_kwargs': 'minimizer_kwargs', 'vqe_options': "{'disp': True}"}), "(CXN, n_qubits, steps=steps, cost_ham=cost_operators, ref_hamiltonian=\n ref_operators, store_basis=True, minimizer=minimize, minimizer_kwargs=\n minimizer_kwargs, vqe_options={'disp': True})\n", (2102, 2297), False, 'from grove.pyqaoa.qaoa import QAOA\n'), ((2634, 2660), 'numpy.hstack', 'np.hstack', (['(betas, gammas)'], {}), '((betas, gammas))\n', (2643, 2660), True, 'import numpy as np\n'), ((1694, 1718), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""X"""', 'ii', '(-1.0)'], {}), "('X', ii, -1.0)\n", (1703, 1718), False, 'from pyquil.paulis import PauliTerm, PauliSum\n'), ((1532, 1570), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'ii', '(2 * asset_list[ii])'], {}), "('Z', ii, 2 * asset_list[ii])\n", (1541, 1570), False, 'from pyquil.paulis import PauliTerm, PauliSum\n'), ((1615, 1653), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'jj', '(A * asset_list[jj])'], {}), "('Z', jj, A * asset_list[jj])\n", (1624, 1653), False, 'from pyquil.paulis import PauliTerm, PauliSum\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import shutil
import random
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import scipy.io as scio
import tensorflow_datasets as tfds
import tensorflow as tf
import numpy as np
from common.inputs import data_input
from common import utils
SRC = 'Pascal VOC 2010/VOCdevkit/VOC2010'
OUTPUT = '../../../data/Pascal VOC 2010'
CATEGORIES = ['bird', 'cat', 'cow', 'dog', 'horse', 'sheep']
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
# In some XML annotation files, the point values are not integers, but floats.
# So we add a float function to avoid ValueError.
return int(float(GetItem(name, root, index)))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in range(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
def image_normalize(image, arch, inverse=False):
if arch == 'InceptionV3':
if inverse:
image = (image + 1) * 127.5 / 255.
image = tf.clip_by_value(image, 0, 1)
else:
image = image / 127.5 - 1
else:
mean = [123.68, 116.779, 103.939]
if inverse:
image = (image + mean) / 255.
image = tf.clip_by_value(image, 0, 1)
else:
image -= mean
return image
def find_animals_file():
datasets = {'train': [], 'valid': []}
for category in CATEGORIES:
train_paths = category + '_train.txt'
with open(os.path.join(SRC, 'ImageSets/Main', train_paths)) as f:
for line in f.readlines():
line = line.strip().split()
if line[-1] == '1':
datasets['train'].append((line[0], category))
valid_paths = category + '_val.txt'
with open(os.path.join(SRC, 'ImageSets/Main', valid_paths)) as f:
for line in f.readlines():
line = line.strip().split()
if line[-1] == '1':
datasets['valid'].append((line[0], category))
with open(os.path.join(OUTPUT, 'animals_train.txt'), 'w') as f:
for sample in datasets['train']:
f.write(sample[0] + ' ' + sample[1])
f.write('\n')
with open(os.path.join(OUTPUT, 'animals_valid.txt'), 'w') as f:
for sample in datasets['valid']:
f.write(sample[0] + ' ' + sample[1])
f.write('\n')
for category in CATEGORIES:
os.makedirs(os.path.join(OUTPUT, 'animal_train', category))
os.makedirs(os.path.join(OUTPUT, 'animal_valid', category))
source = os.path.join(SRC, 'JPEGImages')
for sample in datasets['train']:
shutil.copy(os.path.join(source, sample[0] + '.jpg'), os.path.join(OUTPUT, 'animal_train', sample[1]))
for sample in datasets['valid']:
shutil.copy(os.path.join(source, sample[0] + '.jpg'), os.path.join(OUTPUT, 'animal_valid', sample[1]))
def test_find_multi_label():
datasets = {'train': {}, 'valid': {}}
with open(os.path.join(OUTPUT, 'animals_train.txt')) as f:
for line in f.readlines():
line = line.strip().split()
if line[0] in datasets['train']:
datasets['train'][line[0]].append(line[1])
else:
datasets['train'][line[0]] = [line[1]]
with open(os.path.join(OUTPUT, 'animals_train_mul.txt'), 'w') as f:
for sample in datasets['train']:
label = ''
for item in datasets['train'][sample]:
label += item
label += ' '
f.write(sample + ' ' + label)
f.write('\n')
with open(os.path.join(OUTPUT, 'animals_valid.txt')) as f:
for line in f.readlines():
line = line.strip().split()
if line[0] in datasets['valid']:
datasets['valid'][line[0]].append(line[1])
else:
datasets['valid'][line[0]] = [line[1]]
with open(os.path.join(OUTPUT, 'animals_valid_mul.txt'), 'w') as f:
for sample in datasets['valid']:
label = ''
for item in datasets['valid'][sample]:
label += item
label += ' '
f.write(sample + ' ' + label)
f.write('\n')
def test_generate_masks():
mask_dir = os.path.join(OUTPUT, 'animal_obj_mask')
if not os.path.exists(mask_dir):
os.makedirs(mask_dir)
write_obj_masks('animals_train_mul.txt', mask_dir)
write_obj_masks('animals_valid_mul.txt', mask_dir)
def write_obj_masks(source, mask_dir):
from scipy import misc
with open(os.path.join(OUTPUT, source)) as f:
for line in f.readlines():
line = line.strip().split()
file = os.path.join(OUTPUT, 'Annotations_Part', line[0] + '.mat')
labels = line[1:]
objects = scio.loadmat(file)['anno'][0][0][1][0]
valid_obj = []
for obj in objects:
if obj[0] in labels:
valid_obj.append(obj)
masks = []
for item in valid_obj:
masks.append(np.expand_dims(item[2], -1))
masks = np.concatenate(masks, -1)
masks = np.sum(masks, -1, keepdims=False)
masks[masks > 1] = 1
misc.imsave(os.path.join(mask_dir, line[0] + '.jpg'), masks)
def build_dataset(data_dir='data', batch_size=128, shape=(224, 224, 3), flip=True, crop=True):
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
labels = ['bird', 'cat', 'cow', 'dog', 'horse', 'sheep']
train_path = os.path.join(data_path, 'animal_train_crop')
valid_path = os.path.join(data_path, 'animal_valid_crop')
train_images = []
train_labels = []
valid_images = []
valid_labels = []
for i, l in enumerate(labels):
train_files = os.listdir(os.path.join(train_path, l))
for file in train_files:
file = os.path.join(train_path, l, file)
train_images.append(file)
train_labels.append(i)
valid_files = os.listdir(os.path.join(valid_path, l))
for file in valid_files:
file = os.path.join(valid_path, l, file)
valid_images.append(file)
valid_labels.append(i)
train_images = tf.constant(train_images)
train_labels = tf.constant(train_labels)
valid_images = tf.constant(valid_images)
valid_labels = tf.constant(valid_labels)
train = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(2816).\
map(build_parse((shape[0], shape[1]), flip), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
test = tf.data.Dataset.from_tensor_slices((valid_images, valid_labels)).\
map(build_parse((shape[0], shape[1])), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=len(labels))}),
{'train_examples': 2816,
'test_examples': 2839})
return train, test, info
def build_dataset2(data_dir='data', batch_size=128, shape=(224, 224, 3), target=None):
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC)
train_path = os.path.join(data_path, 'animal_train')
valid_path = os.path.join(data_path, 'animal_valid')
train_images = []
train_boxes = []
train_labels = []
valid_images = []
valid_boxes = []
valid_labels = []
for i, l in enumerate(CATEGORIES):
train_files = os.listdir(os.path.join(train_path, l))
for file in train_files:
obj_anno = file.split('.')[0] + '.xml'
obj_anno = os.path.join(SRC_path, 'Annotations', obj_anno)
tree = ET.parse(obj_anno)
file = os.path.join(train_path, l, file)
train_images.append(file)
train_labels.append(i)
# img = Image.open(file).size
area = 0
box = []
for obj in tree.getroot().iter('object'):
if obj.find('name').text == l:
bndbox = obj.find('bndbox')
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
xmax = int(bndbox.find('xmax').text)
ymax = int(bndbox.find('ymax').text)
new_area = (xmax-xmin)*(ymax-ymin)
if new_area > area:
area = new_area
box = [ymin, xmin, ymax, xmax]
train_boxes.append(box)
valid_files = os.listdir(os.path.join(valid_path, l))
for file in valid_files:
obj_anno = file.split('.')[0] + '.xml'
obj_anno = os.path.join(SRC_path, 'Annotations', obj_anno)
tree = ET.parse(obj_anno)
area = 0
box = []
for obj in tree.getroot().iter('object'):
if obj.find('name').text == l:
bndbox = obj.find('bndbox')
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
xmax = int(bndbox.find('xmax').text)
ymax = int(bndbox.find('ymax').text)
new_area = (xmax - xmin) * (ymax - ymin)
if new_area > area:
area = new_area
box = [ymin, xmin, ymax, xmax]
valid_boxes.append(box)
file = os.path.join(valid_path, l, file)
valid_images.append(file)
valid_labels.append(i)
train_images = tf.constant(train_images)
train_boxes = tf.constant(train_boxes, dtype=tf.float32)
train_labels = tf.constant(train_labels)
valid_images = tf.constant(valid_images)
valid_boxes = tf.constant(valid_boxes, dtype=tf.float32)
valid_labels = tf.constant(valid_labels)
train = tf.data.Dataset.from_tensor_slices((train_images, train_boxes, train_labels)).shuffle(1919).\
map(build_parse2((shape[0], shape[1]), train=True), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
test = tf.data.Dataset.from_tensor_slices((valid_images, valid_boxes, valid_labels)).\
map(build_parse2((shape[0], shape[1])), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=len(CATEGORIES))}),
{'train_examples': 1919,
'test_examples': 1914})
if target:
target = CATEGORIES.index(target)
def single_parse(image, label):
label = tf.equal(label, target)
label = tf.cast(label, tf.int32)
return image, label
train = train.map(single_parse)
test = test.map(single_parse)
return train, test, info
def get_test_set_with_landmark3(data_dir='data', category=None, batch_size=128, shape=(224, 224, 3), arch='InceptionV3'): # full image
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC)
valid_path = os.path.join(data_path, 'animals_valid_mul.txt')
valid_images = []
valid_labels = []
valid_masks = []
def get_parse(size):
def parse(path, label, mask):
image_str = tf.io.read_file(path)
image = tf.image.decode_jpeg(image_str)
image = tf.image.resize(image, size=size)
image = image_normalize(image, arch)
return image, label, mask
return parse
with open(valid_path) as f:
for line in f.readlines():
line = line.strip().split()
file = os.path.join(SRC_path, 'JPEGImages', line[0] + '.jpg')
labels = line[1:]
labels_one_hot = np.zeros([6,])
if category is not None and category not in labels:
continue
for label in labels:
idx = CATEGORIES.index(label)
labels_one_hot[idx] = 1
valid_images.append(file)
valid_labels.append(labels_one_hot)
part_mask = line[0] + '.mat'
part_mask = os.path.join(data_path, 'Annotations_Part', part_mask)
valid_masks.append(part_mask)
valid_images = tf.constant(valid_images)
valid_labels = tf.constant(valid_labels)
valid_landmarks = tf.constant(valid_masks)
num = len(valid_images)
test = tf.data.Dataset.from_tensor_slices((valid_images, valid_labels, valid_landmarks)).\
map(get_parse((shape[0], shape[1])), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=6)}),
{'test_examples': num})
return test, info
def build_dataset3(data_dir='data', batch_size=128, shape=(224, 224, 3), target=None, with_mask=False, arch='InceptionV3', multi=False, shuffle_test=False):
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC, 'JPEGImages')
mask_path = os.path.join(data_path, 'animal_obj_mask')
train_path = os.path.join(data_path, 'animals_train_mul.txt')
valid_path = os.path.join(data_path, 'animals_valid_mul.txt')
train_images = []
train_masks = []
train_labels = []
valid_images = []
valid_masks = []
valid_labels = []
with open(train_path) as f:
for line in f.readlines():
line = line.strip().split()
if multi and len(line) <= 2:
continue
file = os.path.join(SRC_path, line[0] + '.jpg')
mask_file = os.path.join(mask_path, line[0] + '.jpg')
train_images.append(file)
train_masks.append(mask_file)
labels = line[1:]
labels_one_hot = np.zeros([6,])
for label in labels:
idx = CATEGORIES.index(label)
labels_one_hot[idx] = 1
train_labels.append(labels_one_hot)
with open(valid_path) as f:
for line in f.readlines():
line = line.strip().split()
if multi and len(line) <= 2:
continue
file = os.path.join(SRC_path, line[0] + '.jpg')
mask_file = os.path.join(mask_path, line[0] + '.jpg')
valid_images.append(file)
valid_masks.append(mask_file)
labels = line[1:]
labels_one_hot = np.zeros([6,])
for label in labels:
idx = CATEGORIES.index(label)
labels_one_hot[idx] = 1
valid_labels.append(labels_one_hot)
train_num = len(train_images)
valid_num = len(valid_images)
if shuffle_test:
idx = [i for i in range(valid_num)]
np.random.shuffle(idx)
valid_images = np.array(valid_images)[idx]
valid_labels = np.array(valid_labels)[idx]
train_images = tf.constant(train_images)
train_masks = tf.constant(train_masks)
train_labels = tf.constant(train_labels)
valid_images = tf.constant(valid_images)
valid_masks = tf.constant(valid_masks)
valid_labels = tf.constant(valid_labels)
train = tf.data.Dataset.from_tensor_slices(((train_images, train_masks), train_labels)).shuffle(train_num).\
map(build_parse4((shape[0], shape[1]), train=True, with_mask=with_mask, arch=arch), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
test = tf.data.Dataset.from_tensor_slices(((valid_images, valid_masks), valid_labels)).\
map(build_parse4((shape[0], shape[1]), with_mask=with_mask, arch=arch), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=6)}),
{'train_examples': train_num,
'test_examples': valid_num})
if target:
target = CATEGORIES.index(target)
def single_parse(image, label):
label = tf.equal(label, target)
label = tf.cast(label, tf.int32)
label = tf.reduce_sum(label, -1)
return image, label
train = train.map(single_parse)
test = test.map(single_parse)
return train, test, info
def multi2single(target, label):
target = CATEGORIES.index(target)
if isinstance(label, tf.Tensor):
label = label.numpy()
label = label == target
label = label.astype(int)
return label
def build_parse2(size, train=False, brightness=False, contrast=False, arch='InceptionV3'):
def parse(path, bbox, label):
image_str = tf.io.read_file(path)
image = tf.image.decode_jpeg(image_str)
if train:
float_shape = tf.cast(tf.shape(image), tf.float32)
ymin = bbox[0] / float_shape[0]
xmin = bbox[1] / float_shape[1]
ymax = bbox[2] / float_shape[0]
xmax = bbox[3] / float_shape[1]
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
# tf.image.extract_jpeg_shape(image_str),
tf.shape(image),
bounding_boxes=[[[ymin, xmin, ymax, xmax]]],
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Reassemble the bounding box in the format the crop op requires.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
# Use the fused decode and crop op here, which is faster than each in series.
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
else:
bbox = tf.cast(bbox, tf.int32)
image = tf.image.crop_to_bounding_box(
image, bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1])
if train:
image = tf.image.random_flip_left_right(image)
# if brightness:
# image = tf.image.random_brightness(image, max_delta=63)
# if contrast:
# image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.image.resize(image, size=size)
image = image_normalize(image, arch)
return image, label
return parse
def build_parse3(size, train=False, brightness=False, contrast=False, with_mask=False, arch='InceptionV3'):
def parse(path, label):
image_path, mask_path = path
label = tf.cast(label, tf.float32)
image_str = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image_str)
mask_str = tf.io.read_file(mask_path)
mask = tf.image.decode_jpeg(mask_str)
if train:
if with_mask:
mask = tf.tile(mask, [1, 1, 3])
image = tf.concat([image, mask], 0)
image = tf.image.random_flip_left_right(image)
if with_mask:
image, mask = tf.split(image, 2, axis=0)
mask = tf.split(mask, 3, axis=2)[0]
if brightness:
image = tf.image.random_brightness(image, max_delta=63)
if contrast:
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.image.resize(image, size=size)
image = image_normalize(image, arch)
if with_mask:
mask = tf.image.resize(mask, size=size)
mask /= 255.
return (image, mask), label
else:
return image, label
return parse
def build_parse4(size, train=False, with_mask=None, arch='InceptionV3'):
def parse(path, label):
image_path, _ = path
label = tf.cast(label, tf.float32)
image_str = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image_str)
shape = tf.shape(input=image)
height, width = shape[0], shape[1]
if train:
image = tf.image.random_flip_left_right(image)
smaller_dim = tf.minimum(height, width)
image = tf.image.random_crop(image, [smaller_dim-80, smaller_dim-80,3])
image = tf.image.resize(image, size=size)
else:
new_height, new_width = utils.smallest_size_at_least(height, width, size[0])
image = tf.image.resize(image, size=(new_height+1, new_width+1))
image = utils.central_crop(image, size[0], size[1])
image = image_normalize(image, arch)
return image, label
return parse
def build_parse(size, flip=False, arch='InceptionV3'):
def parse(path, label):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, size=size)
if flip:
image = tf.image.random_flip_left_right(image)
image = image_normalize(image, arch)
return image, label
return parse
def get_test_set_with_landmark(data_dir='data', category=None, batch_size=128, shape=(224, 224, 3), arch='InceptionV3'):
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC)
labels = ['bird', 'cat', 'cow', 'dog', 'horse', 'sheep']
valid_path = os.path.join(data_path, 'animal_valid')
valid_images = []
valid_boxes = []
valid_labels = []
valid_masks = []
def get_parse(size):
def parse(path, bbox, label, mask):
image_str = tf.io.read_file(path)
image = tf.image.decode_jpeg(image_str)
bbox = tf.cast(bbox, tf.int32)
image = tf.image.crop_to_bounding_box(
image, bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1])
image = tf.image.resize(image, size=size)
image = image_normalize(image, arch)
return image, bbox, label, mask
return parse
for i, l in enumerate(labels):
if category is not None and category != l:
continue
valid_files = os.listdir(os.path.join(valid_path, l))
for file in valid_files:
obj_anno = file.split('.')[0] + '.xml'
obj_anno = os.path.join(SRC_path, 'Annotations', obj_anno)
tree = ET.parse(obj_anno)
boxes = get_boxes(tree, l)
if len(boxes) != 1:
continue
part_mask = file.split('.')[0] + '.mat'
part_mask = os.path.join(data_path, 'Annotations_Part', part_mask)
file = os.path.join(valid_path, l, file)
objects = scio.loadmat(part_mask)['anno'][0][0][1][0]
valid_obj = []
for obj in objects:
if obj[0] == l:
valid_obj.append(obj)
if len(valid_obj) != 1:
raise Exception('more than 1 obj!')
if len(valid_obj[0][3]) == 0:
continue
valid_boxes += boxes
valid_images.append(file)
valid_labels.append(i)
valid_masks.append(part_mask)
valid_images = tf.constant(valid_images)
valid_boxes = tf.constant(valid_boxes, dtype=tf.float32)
valid_labels = tf.constant(valid_labels)
valid_landmarks = tf.constant(valid_masks)
test = tf.data.Dataset.from_tensor_slices((valid_images, valid_boxes, valid_labels, valid_landmarks)).\
map(get_parse((shape[0], shape[1])), num_parallel_calls=tf.data.experimental.AUTOTUNE).\
batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
info = data_input.DataInfo(tfds.features.FeaturesDict({'image': tfds.features.Image(shape=shape),
'label': tfds.features.ClassLabel(num_classes=len(labels))}),
{'test_examples': 1460})
return test, info
def parse_masks(images, files, labels, boxes):
filtered_images = []
filtered_labels = []
filtered_masks = []
shape = images.get_shape().as_list()
h, w = shape[1], shape[2]
for image, file, label, box in zip(images, files, labels, boxes):
# file = tf.constant('../../../data\\Pascal VOC 2010\\Annotations_Part\\2009_002002.mat')
mask = {}
objects = scio.loadmat(file.numpy())['anno'][0][0][1][0]
valid_obj = []
for obj in objects:
if obj[0] == label:
valid_obj.append(obj)
if len(valid_obj) != 1:
raise Exception('more than 1 obj!')
for i, item in enumerate(valid_obj[0]):
if i == 2:
item = crop_resize_mask(item, box, h, w)
mask['obj'] = item
if i == 3:
parts = {}
if len(item) == 0:
print(item)
continue
for part in item[0]:
name = part[0][0]
value = part[1]
parts[name] = crop_resize_mask(value, box, h, w)
parts = merge_parts(parts, label, h, w)
mask['parts'] = parts
if 'parts' in mask:
filtered_images.append(image)
filtered_labels.append(label)
filtered_masks.append(mask)
return filtered_images, filtered_labels, filtered_masks
def get_boxes(tree, label):
boxes = []
for obj in tree.getroot().iter('object'):
if obj.find('name').text == label:
bndbox = obj.find('bndbox')
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
xmax = int(bndbox.find('xmax').text)
ymax = int(bndbox.find('ymax').text)
box = [ymin, xmin, ymax, xmax]
boxes.append(box)
return boxes
def test_test_landmark():
batch = 16
test, info = get_test_set_with_landmark('../../../data', batch_size=batch)
# test = test.shuffle(1000)
count1 = 0
count2 = 0
for images, boxes, labels, masks in test:
count1 += images.get_shape().as_list()[0]
txt_label = [CATEGORIES[l] for l in labels]
images, labels, masks = parse_masks(images, masks, txt_label, boxes)
count2 += len(images)
parts = get_parts(masks)
viz_image_mask(images, masks)
print('count1:{}'.format(count1))
print('count2:{}'.format(count2))
def get_parts(masks):
result = []
for mask in masks:
result.append(mask['parts'])
return result
def viz_image_mask(images, masks):
for image, mask in zip(images, masks):
mask_img = [np.tile(mask['obj'], [1, 1, 3])]
for part in mask['parts']:
part_img = mask['parts'][part]
part_img = np.tile(part_img, [1, 1, 3])
mask_img.append(part_img)
show = np.concatenate([image] + mask_img, 1)
plt.imshow(show)
plt.show()
def crop_resize_mask(mask, box, h, w):
mask = mask[box[0]:box[2], box[1]:box[3], np.newaxis]
mask = tf.image.resize(mask, [h, w]).numpy()
mask[mask > 0] = 1
return mask
def merge_parts(parts, label, h, w):
results = {}
if label == 'bird':
head = []
torso = []
leg = []
tail = []
results = {'head': head, 'torso': torso, 'leg': leg, 'tail': tail}
for part in parts:
if part in ['head', 'leye', 'reye', 'beak']:
head.append(parts[part])
if part in ['torso', 'neck', 'lwing', 'rwing']:
torso.append(parts[part])
if part in ['lleg', 'rleg', 'lfoot', 'rfoot']:
leg.append(parts[part])
if part in ['tail']:
tail.append(parts[part])
if label in ['cat', 'dog', 'cow', 'sheep', 'horse']:
head = []
torso = []
bleg = []
fleg = []
tail = []
results = {'head': head, 'torso': torso, 'bleg': bleg, 'fleg': fleg}
if label in ['cat', 'dog']:
results['tail'] = tail
for part in parts:
if part in ['head', 'leye', 'reye', 'lear', 'rear', 'nose', 'muzzle', 'rhorn', 'lhorn']:
head.append(parts[part])
if part in ['torso', 'neck']:
torso.append(parts[part])
if part in ['lbleg', 'rbleg', 'lbpa', 'rbpa', 'lblleg', 'lbuleg', 'rblleg', 'rbuleg', 'rbho', 'lbho']:
bleg.append(parts[part])
if part in ['lfleg', 'rfleg', 'lfpa', 'rfpa', 'lflleg', 'lfuleg', 'rflleg', 'rfuleg', 'rfho', 'lfho']:
fleg.append(parts[part])
if part in ['tail']:
tail.append(parts[part])
final = {}
for merged in results:
if len(results[merged]) > 1:
summed = np.sum(results[merged], 0)
summed[summed > 0] = 1
final[merged] = summed
elif len(results[merged]) == 1:
summed = results[merged][0]
summed[summed > 0] = 1
final[merged] = summed
elif len(results[merged]) == 0:
final[merged] = np.zeros(shape=(h, w, 1))
return final
def test_find_shapes():
from scipy import misc
data_dir = '../../../data'
data_path = os.path.join(data_dir, 'Pascal VOC 2010')
SRC_path = os.path.join(data_dir, SRC, 'JPEGImages')
train_path = os.path.join(data_path, 'animals_train_mul.txt')
valid_path = os.path.join(data_path, 'animals_valid_mul.txt')
hs = []
ws = []
with open(train_path) as f:
for line in f.readlines():
line = line.strip().split()
file = os.path.join(SRC_path, line[0] + '.jpg')
image = misc.imread(file)
h, w = image.shape[0], image.shape[1]
hs.append(h)
ws.append(w)
print('h min:{},h max:{}'.format(min(hs), max(hs)))
print('w min:{},w max:{}'.format(min(ws), max(ws)))
def test_read():
train, test, info = build_dataset3('../../../data', multi=True)
count = 0
for image, label in train:
count += image.shape[0]
print('train num', count)
count = 0
for image, label in test:
count += image.shape[0]
print('test num', count)
def test_view_data():
train, test, info = build_dataset3('../../../data', with_mask=False, multi=True)
for image, label in train:
# image, mask = image
# h,w =image
image = (image + 1)*127.5/255
# mask = np.tile(mask, [1, 1, 1, 3])
# image = np.concatenate([image, mask], 2)
out_image(image, label)
break
for image, label in test:
# image, mask = image
image = (image + 1)*127.5/255
out_image(image, label)
break
def out_image(images, labels, preds=None, photos=16):
fig = plt.figure()
fig.tight_layout()
plt.subplots_adjust(wspace=0.05, hspace=0.05, top=0.95, bottom=0.05, right=0.95, left=0.05)
for i in range(photos):
plt.subplot(photos/2, 2, i+1)
plt.axis('off')
if preds is None:
title = str(labels[i])
else:
title = str(labels[i]) + '_' + str(preds[i])
plt.title(title)
image = images[i, :, :, :]
if image.shape[-1] == 1:
image = np.squeeze(image, -1)
plt.imshow(image, cmap='gray')
else:
plt.imshow(image)
plt.subplots_adjust(hspace=0.5)
plt.show()
|
[
"matplotlib.pyplot.title",
"tensorflow.reduce_sum",
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow_datasets.features.Image",
"scipy.io.loadmat",
"matplotlib.pyplot.figure",
"numpy.tile",
"tensorflow.split",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.image.random_crop",
"matplotlib.pyplot.imshow",
"tensorflow.image.random_contrast",
"tensorflow.concat",
"tensorflow.minimum",
"tensorflow.cast",
"tensorflow.io.read_file",
"common.utils.central_crop",
"tensorflow.equal",
"numpy.random.shuffle",
"xml.etree.ElementTree.parse",
"matplotlib.pyplot.show",
"tensorflow.constant",
"tensorflow.image.random_flip_left_right",
"tensorflow.tile",
"matplotlib.pyplot.subplots_adjust",
"numpy.squeeze",
"tensorflow.image.random_brightness",
"numpy.concatenate",
"scipy.misc.imread",
"matplotlib.pyplot.subplot",
"tensorflow_datasets.features.ClassLabel",
"common.utils.smallest_size_at_least",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.expand_dims",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.shape",
"numpy.array",
"tensorflow.image.decode_jpeg",
"tensorflow.image.resize",
"tensorflow.unstack"
] |
[((8348, 8373), 'tensorflow.constant', 'tf.constant', (['train_images'], {}), '(train_images)\n', (8359, 8373), True, 'import tensorflow as tf\n'), ((8393, 8418), 'tensorflow.constant', 'tf.constant', (['train_labels'], {}), '(train_labels)\n', (8404, 8418), True, 'import tensorflow as tf\n'), ((8438, 8463), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (8449, 8463), True, 'import tensorflow as tf\n'), ((8483, 8508), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (8494, 8508), True, 'import tensorflow as tf\n'), ((12032, 12057), 'tensorflow.constant', 'tf.constant', (['train_images'], {}), '(train_images)\n', (12043, 12057), True, 'import tensorflow as tf\n'), ((12076, 12118), 'tensorflow.constant', 'tf.constant', (['train_boxes'], {'dtype': 'tf.float32'}), '(train_boxes, dtype=tf.float32)\n', (12087, 12118), True, 'import tensorflow as tf\n'), ((12138, 12163), 'tensorflow.constant', 'tf.constant', (['train_labels'], {}), '(train_labels)\n', (12149, 12163), True, 'import tensorflow as tf\n'), ((12183, 12208), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (12194, 12208), True, 'import tensorflow as tf\n'), ((12227, 12269), 'tensorflow.constant', 'tf.constant', (['valid_boxes'], {'dtype': 'tf.float32'}), '(valid_boxes, dtype=tf.float32)\n', (12238, 12269), True, 'import tensorflow as tf\n'), ((12289, 12314), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (12300, 12314), True, 'import tensorflow as tf\n'), ((15003, 15028), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (15014, 15028), True, 'import tensorflow as tf\n'), ((15048, 15073), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (15059, 15073), True, 'import tensorflow as tf\n'), ((15096, 15120), 'tensorflow.constant', 'tf.constant', (['valid_masks'], {}), '(valid_masks)\n', (15107, 15120), True, 'import tensorflow as tf\n'), ((17822, 17847), 'tensorflow.constant', 'tf.constant', (['train_images'], {}), '(train_images)\n', (17833, 17847), True, 'import tensorflow as tf\n'), ((17866, 17890), 'tensorflow.constant', 'tf.constant', (['train_masks'], {}), '(train_masks)\n', (17877, 17890), True, 'import tensorflow as tf\n'), ((17910, 17935), 'tensorflow.constant', 'tf.constant', (['train_labels'], {}), '(train_labels)\n', (17921, 17935), True, 'import tensorflow as tf\n'), ((17955, 17980), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (17966, 17980), True, 'import tensorflow as tf\n'), ((17999, 18023), 'tensorflow.constant', 'tf.constant', (['valid_masks'], {}), '(valid_masks)\n', (18010, 18023), True, 'import tensorflow as tf\n'), ((18043, 18068), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (18054, 18068), True, 'import tensorflow as tf\n'), ((26364, 26389), 'tensorflow.constant', 'tf.constant', (['valid_images'], {}), '(valid_images)\n', (26375, 26389), True, 'import tensorflow as tf\n'), ((26408, 26450), 'tensorflow.constant', 'tf.constant', (['valid_boxes'], {'dtype': 'tf.float32'}), '(valid_boxes, dtype=tf.float32)\n', (26419, 26450), True, 'import tensorflow as tf\n'), ((26470, 26495), 'tensorflow.constant', 'tf.constant', (['valid_labels'], {}), '(valid_labels)\n', (26481, 26495), True, 'import tensorflow as tf\n'), ((26518, 26542), 'tensorflow.constant', 'tf.constant', (['valid_masks'], {}), '(valid_masks)\n', (26529, 26542), True, 'import tensorflow as tf\n'), ((33982, 33994), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (33992, 33994), True, 'import matplotlib.pyplot as plt\n'), ((34022, 34118), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.05)', 'hspace': '(0.05)', 'top': '(0.95)', 'bottom': '(0.05)', 'right': '(0.95)', 'left': '(0.05)'}), '(wspace=0.05, hspace=0.05, top=0.95, bottom=0.05, right=\n 0.95, left=0.05)\n', (34041, 34118), True, 'import matplotlib.pyplot as plt\n'), ((34562, 34593), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (34581, 34593), True, 'import matplotlib.pyplot as plt\n'), ((34598, 34608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34606, 34608), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1381), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_file'], {}), '(xml_file)\n', (1371, 1381), True, 'import xml.etree.ElementTree as ET\n'), ((17677, 17699), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (17694, 17699), True, 'import numpy as np\n'), ((19807, 19828), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (19822, 19828), True, 'import tensorflow as tf\n'), ((19845, 19876), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (19865, 19876), True, 'import tensorflow as tf\n'), ((21550, 21583), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (21565, 21583), True, 'import tensorflow as tf\n'), ((21865, 21891), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (21872, 21891), True, 'import tensorflow as tf\n'), ((21912, 21939), 'tensorflow.io.read_file', 'tf.io.read_file', (['image_path'], {}), '(image_path)\n', (21927, 21939), True, 'import tensorflow as tf\n'), ((21956, 21987), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (21976, 21987), True, 'import tensorflow as tf\n'), ((22007, 22033), 'tensorflow.io.read_file', 'tf.io.read_file', (['mask_path'], {}), '(mask_path)\n', (22022, 22033), True, 'import tensorflow as tf\n'), ((22049, 22079), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['mask_str'], {}), '(mask_str)\n', (22069, 22079), True, 'import tensorflow as tf\n'), ((22636, 22669), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (22651, 22669), True, 'import tensorflow as tf\n'), ((23066, 23092), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (23073, 23092), True, 'import tensorflow as tf\n'), ((23113, 23140), 'tensorflow.io.read_file', 'tf.io.read_file', (['image_path'], {}), '(image_path)\n', (23128, 23140), True, 'import tensorflow as tf\n'), ((23157, 23188), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (23177, 23188), True, 'import tensorflow as tf\n'), ((23205, 23226), 'tensorflow.shape', 'tf.shape', ([], {'input': 'image'}), '(input=image)\n', (23213, 23226), True, 'import tensorflow as tf\n'), ((23972, 23993), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (23987, 23993), True, 'import tensorflow as tf\n'), ((24010, 24037), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image'], {}), '(image)\n', (24030, 24037), True, 'import tensorflow as tf\n'), ((24054, 24087), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (24069, 24087), True, 'import tensorflow as tf\n'), ((30039, 30076), 'numpy.concatenate', 'np.concatenate', (['([image] + mask_img)', '(1)'], {}), '([image] + mask_img, 1)\n', (30053, 30076), True, 'import numpy as np\n'), ((30085, 30101), 'matplotlib.pyplot.imshow', 'plt.imshow', (['show'], {}), '(show)\n', (30095, 30101), True, 'import matplotlib.pyplot as plt\n'), ((30110, 30120), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30118, 30120), True, 'import matplotlib.pyplot as plt\n'), ((34150, 34183), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(photos / 2)', '(2)', '(i + 1)'], {}), '(photos / 2, 2, i + 1)\n', (34161, 34183), True, 'import matplotlib.pyplot as plt\n'), ((34188, 34203), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (34196, 34203), True, 'import matplotlib.pyplot as plt\n'), ((34344, 34360), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (34353, 34360), True, 'import matplotlib.pyplot as plt\n'), ((3116, 3145), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (3132, 3145), True, 'import tensorflow as tf\n'), ((3332, 3361), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (3348, 3361), True, 'import tensorflow as tf\n'), ((7237, 7262), 'numpy.concatenate', 'np.concatenate', (['masks', '(-1)'], {}), '(masks, -1)\n', (7251, 7262), True, 'import numpy as np\n'), ((7283, 7316), 'numpy.sum', 'np.sum', (['masks', '(-1)'], {'keepdims': '(False)'}), '(masks, -1, keepdims=False)\n', (7289, 7316), True, 'import numpy as np\n'), ((10146, 10164), 'xml.etree.ElementTree.parse', 'ET.parse', (['obj_anno'], {}), '(obj_anno)\n', (10154, 10164), True, 'import xml.etree.ElementTree as ET\n'), ((11215, 11233), 'xml.etree.ElementTree.parse', 'ET.parse', (['obj_anno'], {}), '(obj_anno)\n', (11223, 11233), True, 'import xml.etree.ElementTree as ET\n'), ((13369, 13392), 'tensorflow.equal', 'tf.equal', (['label', 'target'], {}), '(label, target)\n', (13377, 13392), True, 'import tensorflow as tf\n'), ((13413, 13437), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (13420, 13437), True, 'import tensorflow as tf\n'), ((14034, 14055), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (14049, 14055), True, 'import tensorflow as tf\n'), ((14076, 14107), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (14096, 14107), True, 'import tensorflow as tf\n'), ((14128, 14161), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (14143, 14161), True, 'import tensorflow as tf\n'), ((14511, 14524), 'numpy.zeros', 'np.zeros', (['[6]'], {}), '([6])\n', (14519, 14524), True, 'import numpy as np\n'), ((16733, 16746), 'numpy.zeros', 'np.zeros', (['[6]'], {}), '([6])\n', (16741, 16746), True, 'import numpy as np\n'), ((17353, 17366), 'numpy.zeros', 'np.zeros', (['[6]'], {}), '([6])\n', (17361, 17366), True, 'import numpy as np\n'), ((17723, 17745), 'numpy.array', 'np.array', (['valid_images'], {}), '(valid_images)\n', (17731, 17745), True, 'import numpy as np\n'), ((17774, 17796), 'numpy.array', 'np.array', (['valid_labels'], {}), '(valid_labels)\n', (17782, 17796), True, 'import numpy as np\n'), ((19192, 19215), 'tensorflow.equal', 'tf.equal', (['label', 'target'], {}), '(label, target)\n', (19200, 19215), True, 'import tensorflow as tf\n'), ((19236, 19260), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (19243, 19260), True, 'import tensorflow as tf\n'), ((19281, 19305), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['label', '(-1)'], {}), '(label, -1)\n', (19294, 19305), True, 'import tensorflow as tf\n'), ((20771, 20793), 'tensorflow.unstack', 'tf.unstack', (['bbox_begin'], {}), '(bbox_begin)\n', (20781, 20793), True, 'import tensorflow as tf\n'), ((20839, 20860), 'tensorflow.unstack', 'tf.unstack', (['bbox_size'], {}), '(bbox_size)\n', (20849, 20860), True, 'import tensorflow as tf\n'), ((20972, 21061), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['image', 'offset_y', 'offset_x', 'target_height', 'target_width'], {}), '(image, offset_y, offset_x, target_height,\n target_width)\n', (21001, 21061), True, 'import tensorflow as tf\n'), ((21112, 21135), 'tensorflow.cast', 'tf.cast', (['bbox', 'tf.int32'], {}), '(bbox, tf.int32)\n', (21119, 21135), True, 'import tensorflow as tf\n'), ((21156, 21253), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['image', 'bbox[0]', 'bbox[1]', '(bbox[2] - bbox[0])', '(bbox[3] - bbox[1])'], {}), '(image, bbox[0], bbox[1], bbox[2] - bbox[0], \n bbox[3] - bbox[1])\n', (21185, 21253), True, 'import tensorflow as tf\n'), ((21301, 21339), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (21332, 21339), True, 'import tensorflow as tf\n'), ((22244, 22282), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (22275, 22282), True, 'import tensorflow as tf\n'), ((22756, 22788), 'tensorflow.image.resize', 'tf.image.resize', (['mask'], {'size': 'size'}), '(mask, size=size)\n', (22771, 22788), True, 'import tensorflow as tf\n'), ((23308, 23346), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (23339, 23346), True, 'import tensorflow as tf\n'), ((23373, 23398), 'tensorflow.minimum', 'tf.minimum', (['height', 'width'], {}), '(height, width)\n', (23383, 23398), True, 'import tensorflow as tf\n'), ((23419, 23487), 'tensorflow.image.random_crop', 'tf.image.random_crop', (['image', '[smaller_dim - 80, smaller_dim - 80, 3]'], {}), '(image, [smaller_dim - 80, smaller_dim - 80, 3])\n', (23439, 23487), True, 'import tensorflow as tf\n'), ((23503, 23536), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (23518, 23536), True, 'import tensorflow as tf\n'), ((23587, 23639), 'common.utils.smallest_size_at_least', 'utils.smallest_size_at_least', (['height', 'width', 'size[0]'], {}), '(height, width, size[0])\n', (23615, 23639), False, 'from common import utils\n'), ((23660, 23720), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': '(new_height + 1, new_width + 1)'}), '(image, size=(new_height + 1, new_width + 1))\n', (23675, 23720), True, 'import tensorflow as tf\n'), ((23737, 23780), 'common.utils.central_crop', 'utils.central_crop', (['image', 'size[0]', 'size[1]'], {}), '(image, size[0], size[1])\n', (23755, 23780), False, 'from common import utils\n'), ((24125, 24163), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (24156, 24163), True, 'import tensorflow as tf\n'), ((24776, 24797), 'tensorflow.io.read_file', 'tf.io.read_file', (['path'], {}), '(path)\n', (24791, 24797), True, 'import tensorflow as tf\n'), ((24818, 24849), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_str'], {}), '(image_str)\n', (24838, 24849), True, 'import tensorflow as tf\n'), ((24869, 24892), 'tensorflow.cast', 'tf.cast', (['bbox', 'tf.int32'], {}), '(bbox, tf.int32)\n', (24876, 24892), True, 'import tensorflow as tf\n'), ((24913, 25010), 'tensorflow.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['image', 'bbox[0]', 'bbox[1]', '(bbox[2] - bbox[0])', '(bbox[3] - bbox[1])'], {}), '(image, bbox[0], bbox[1], bbox[2] - bbox[0], \n bbox[3] - bbox[1])\n', (24942, 25010), True, 'import tensorflow as tf\n'), ((25047, 25080), 'tensorflow.image.resize', 'tf.image.resize', (['image'], {'size': 'size'}), '(image, size=size)\n', (25062, 25080), True, 'import tensorflow as tf\n'), ((25540, 25558), 'xml.etree.ElementTree.parse', 'ET.parse', (['obj_anno'], {}), '(obj_anno)\n', (25548, 25558), True, 'import xml.etree.ElementTree as ET\n'), ((29823, 29854), 'numpy.tile', 'np.tile', (["mask['obj']", '[1, 1, 3]'], {}), "(mask['obj'], [1, 1, 3])\n", (29830, 29854), True, 'import numpy as np\n'), ((29957, 29985), 'numpy.tile', 'np.tile', (['part_img', '[1, 1, 3]'], {}), '(part_img, [1, 1, 3])\n', (29964, 29985), True, 'import numpy as np\n'), ((30231, 30260), 'tensorflow.image.resize', 'tf.image.resize', (['mask', '[h, w]'], {}), '(mask, [h, w])\n', (30246, 30260), True, 'import tensorflow as tf\n'), ((31971, 31997), 'numpy.sum', 'np.sum', (['results[merged]', '(0)'], {}), '(results[merged], 0)\n', (31977, 31997), True, 'import numpy as np\n'), ((32871, 32888), 'scipy.misc.imread', 'misc.imread', (['file'], {}), '(file)\n', (32882, 32888), False, 'from scipy import misc\n'), ((34449, 34470), 'numpy.squeeze', 'np.squeeze', (['image', '(-1)'], {}), '(image, -1)\n', (34459, 34470), True, 'import numpy as np\n'), ((34483, 34513), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (34493, 34513), True, 'import matplotlib.pyplot as plt\n'), ((34540, 34557), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (34550, 34557), True, 'import matplotlib.pyplot as plt\n'), ((9086, 9118), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (9105, 9118), True, 'import tensorflow_datasets as tfds\n'), ((12926, 12958), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (12945, 12958), True, 'import tensorflow_datasets as tfds\n'), ((15477, 15509), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (15496, 15509), True, 'import tensorflow_datasets as tfds\n'), ((15579, 15618), 'tensorflow_datasets.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(6)'}), '(num_classes=6)\n', (15603, 15618), True, 'import tensorflow_datasets as tfds\n'), ((18753, 18785), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (18772, 18785), True, 'import tensorflow_datasets as tfds\n'), ((18855, 18894), 'tensorflow_datasets.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'num_classes': '(6)'}), '(num_classes=6)\n', (18879, 18894), True, 'import tensorflow_datasets as tfds\n'), ((19930, 19945), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (19938, 19945), True, 'import tensorflow as tf\n'), ((20293, 20308), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (20301, 20308), True, 'import tensorflow as tf\n'), ((22147, 22171), 'tensorflow.tile', 'tf.tile', (['mask', '[1, 1, 3]'], {}), '(mask, [1, 1, 3])\n', (22154, 22171), True, 'import tensorflow as tf\n'), ((22196, 22223), 'tensorflow.concat', 'tf.concat', (['[image, mask]', '(0)'], {}), '([image, mask], 0)\n', (22205, 22223), True, 'import tensorflow as tf\n'), ((22339, 22365), 'tensorflow.split', 'tf.split', (['image', '(2)'], {'axis': '(0)'}), '(image, 2, axis=0)\n', (22347, 22365), True, 'import tensorflow as tf\n'), ((22469, 22516), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': '(63)'}), '(image, max_delta=63)\n', (22495, 22516), True, 'import tensorflow as tf\n'), ((22566, 22619), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.2)', 'upper': '(1.8)'}), '(image, lower=0.2, upper=1.8)\n', (22590, 22619), True, 'import tensorflow as tf\n'), ((26884, 26916), 'tensorflow_datasets.features.Image', 'tfds.features.Image', ([], {'shape': 'shape'}), '(shape=shape)\n', (26903, 26916), True, 'import tensorflow_datasets as tfds\n'), ((7188, 7215), 'numpy.expand_dims', 'np.expand_dims', (['item[2]', '(-1)'], {}), '(item[2], -1)\n', (7202, 7215), True, 'import numpy as np\n'), ((22389, 22414), 'tensorflow.split', 'tf.split', (['mask', '(3)'], {'axis': '(2)'}), '(mask, 3, axis=2)\n', (22397, 22414), True, 'import tensorflow as tf\n'), ((32286, 32311), 'numpy.zeros', 'np.zeros', ([], {'shape': '(h, w, 1)'}), '(shape=(h, w, 1))\n', (32294, 32311), True, 'import numpy as np\n'), ((8785, 8849), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_images, valid_labels)'], {}), '((valid_images, valid_labels))\n', (8819, 8849), True, 'import tensorflow as tf\n'), ((12611, 12688), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_images, valid_boxes, valid_labels)'], {}), '((valid_images, valid_boxes, valid_labels))\n', (12645, 12688), True, 'import tensorflow as tf\n'), ((15161, 15246), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_images, valid_labels, valid_landmarks)'], {}), '((valid_images, valid_labels,\n valid_landmarks))\n', (15195, 15246), True, 'import tensorflow as tf\n'), ((18404, 18483), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['((valid_images, valid_masks), valid_labels)'], {}), '(((valid_images, valid_masks), valid_labels))\n', (18438, 18483), True, 'import tensorflow as tf\n'), ((26555, 26653), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_images, valid_boxes, valid_labels, valid_landmarks)'], {}), '((valid_images, valid_boxes, valid_labels,\n valid_landmarks))\n', (26589, 26653), True, 'import tensorflow as tf\n'), ((6924, 6942), 'scipy.io.loadmat', 'scio.loadmat', (['file'], {}), '(file)\n', (6936, 6942), True, 'import scipy.io as scio\n'), ((25863, 25886), 'scipy.io.loadmat', 'scio.loadmat', (['part_mask'], {}), '(part_mask)\n', (25875, 25886), True, 'import scipy.io as scio\n'), ((8522, 8586), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_images, train_labels)'], {}), '((train_images, train_labels))\n', (8556, 8586), True, 'import tensorflow as tf\n'), ((12328, 12405), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_images, train_boxes, train_labels)'], {}), '((train_images, train_boxes, train_labels))\n', (12362, 12405), True, 'import tensorflow as tf\n'), ((18082, 18161), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['((train_images, train_masks), train_labels)'], {}), '(((train_images, train_masks), train_labels))\n', (18116, 18161), True, 'import tensorflow as tf\n')]
|
import numpy as np
from ..utils import GeneticAlgorithm as GA
from ..utils import round_vars
from .lcb_merit import lcb_merit
def lcb_ga(num_pts, opt_prob, surrogate, X, fX, Xpend=None, kappa=2.0, dtol=1e-3, lcb_target=None):
"""Minimize the LCB using a genetic algorithm.
:param num_pts: Number of points to generate
:type num_pts: int
:param opt_prob: Optimization problem
:type opt_prob: object
:param surrogate: Surrogate model object
:type surrogate: object
:param X: Previously evaluated points, of size n x dim
:type X: numpy.array
:param fX: Values at previously evaluated points, of size n x 1
:type fX: numpy.array
:param Xpend: Pending evaluations
:type Xpend: numpy.array
:param dtol: Minimum distance between evaluated and pending points
:type dtol: float
:param lcb_target: Return None if we don't find an LCB value <= lcb_target
:type lcb_target: float
:return: num_pts new points to evaluate
:rtype: numpy.array of size num_pts x dim
"""
if Xpend is None: # cdist can't handle None arguments
Xpend = np.empty([0, opt_prob.dim])
XX = np.vstack((X, Xpend))
new_points = np.zeros((num_pts, opt_prob.dim))
for i in range(num_pts):
def obj(Y):
"""Round integer variables and compute LCB."""
Y = round_vars(Y.copy(), opt_prob.int_var, opt_prob.lb, opt_prob.ub)
return lcb_merit(X=Y, surrogate=surrogate, fX=fX, XX=XX, dtol=dtol, kappa=kappa)
ga = GA(
function=obj,
dim=opt_prob.dim,
lb=opt_prob.lb,
ub=opt_prob.ub,
int_var=opt_prob.int_var,
pop_size=max([2 * opt_prob.dim, 100]),
num_gen=100,
)
x_best, f_min = ga.optimize()
if f_min > lcb_target:
return None # Give up
new_points[i, :] = x_best
XX = np.vstack((XX, x_best))
return new_points
|
[
"numpy.empty",
"numpy.zeros",
"numpy.vstack"
] |
[((1152, 1173), 'numpy.vstack', 'np.vstack', (['(X, Xpend)'], {}), '((X, Xpend))\n', (1161, 1173), True, 'import numpy as np\n'), ((1192, 1225), 'numpy.zeros', 'np.zeros', (['(num_pts, opt_prob.dim)'], {}), '((num_pts, opt_prob.dim))\n', (1200, 1225), True, 'import numpy as np\n'), ((1115, 1142), 'numpy.empty', 'np.empty', (['[0, opt_prob.dim]'], {}), '([0, opt_prob.dim])\n', (1123, 1142), True, 'import numpy as np\n'), ((1916, 1939), 'numpy.vstack', 'np.vstack', (['(XX, x_best)'], {}), '((XX, x_best))\n', (1925, 1939), True, 'import numpy as np\n')]
|
# coding: utf8
import requests
import json
if __name__ == "__main__":
# 指定用于匹配的文本并生成字典{"text_1": [text_a1, text_a2, ... ]
# "text_2": [text_b1, text_b2, ... ]}
text = {
"text_1": ["这道题太难了", "这道题太难了", "这道题太难了"],
"text_2": ["这道题是上一年的考题", "这道题不简单", "这道题很有意思"]
}
# 指定匹配方法为simnet_bow并发送post请求
url = "http://1172.16.58.3:8866/predict/text/simnet_bow"
r = requests.post(url=url, data=text)
# 打印匹配结果
print(json.dumps(r.json(), indent=4, ensure_ascii=False))
|
[
"requests.post"
] |
[((423, 456), 'requests.post', 'requests.post', ([], {'url': 'url', 'data': 'text'}), '(url=url, data=text)\n', (436, 456), False, 'import requests\n')]
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from common.models import ProductBaseModel
from .product_brand import ProductBrand
from .product_group import ProductGroup
from .product_type import ProductType
class Product(ProductBaseModel):
name = models.CharField(
max_length=256,
help_text=_("Name of product.")
)
product_group = models.ForeignKey(
ProductGroup,
on_delete=models.SET_NULL,
blank=True,
null=True,
help_text=_("related product group."),
related_name="products",
)
product_type = models.ForeignKey(
ProductType,
on_delete=models.SET_NULL,
blank=True,
null=True,
help_text=_("related product type"),
related_name="products",
)
brand = models.ForeignKey(
ProductBrand,
on_delete=models.SET_NULL,
blank=True,
null=True,
help_text=_("related product brand"),
related_name="products",
)
in_stock = models.PositiveIntegerField(
default=0,
help_text=_("Amount of available products.")
)
short_description = models.TextField(
help_text=_("Short summary, can be used in search results."),
blank=True,
default="",
)
long_description = models.TextField(
help_text=_("Long Description"),
blank=True,
default=""
)
weight = models.DecimalField(
max_digits=13,
decimal_places=3,
blank=True,
null=True,
help_text=_(
"Default weight of product in grams. "
),
)
release_date = models.DateField(
blank=True,
null=True,
help_text=_("Release date. Product release on date, can be used for taking pre-orders."),
)
pre_order = models.BooleanField(
default=False,
verbose_name=_("Is pre-order product"),
help_text=_('Can be pre ordered')
)
is_serviceable = models.BooleanField(
default=False,
help_text=_("Is the product serviceable")
)
service_period = models.IntegerField(
default=0,
help_text=_("Service Period in Months. How long user get free service."),
verbose_name=_("Service Period in Months."),
)
valid_from = models.DateTimeField(
null=True,
blank=True,
verbose_name=_("Valid from"),
help_text=_("Enter the datetime from which the product is valid"),
)
valid_until = models.DateTimeField(
null=True,
blank=True,
verbose_name=_("Valid until"),
help_text=_("Enter the datetime on which the product's validity expires"),
)
# image_thumbnail = models.ImageField(
# upload_to="product_images/%Y/%m/%d/",
# max_length=500,
# verbose_name=_("Thumbnail image"),
# help_text=_("Use this for the thumbnail"),
# blank=True,
# null=True
# )
# image_alternative_1 = models.ImageField(
# upload_to="product_images/%Y/%m/%d/",
# max_length=500,
# verbose_name=_("Alternative image 1"),
# help_text=_("Additional image"),
# blank=True,
# null=True
# )
# image_alternative_2 = models.ImageField(
# upload_to="product_images/%Y/%m/%d/",
# max_length=500,
# verbose_name=_("Alternative image 2"),
# help_text=_("Additional image"),
# blank=True,
# null=True
# )
# image_alternative_3 = models.ImageField(
# upload_to="product_images/%Y/%m/%d/",
# max_length=500,
# verbose_name=_("Alternative image 3"),
# help_text=_("Additional image"),
# blank=True,
# null=True
# )
# image_alternative_4 = models.ImageField(
# upload_to="product_images/%Y/%m/%d/",
# max_length=500,
# verbose_name=_("Alternative image 4"),
# help_text=_("Additional image"),
# blank=True,
# null=True
# )
class Meta:
ordering = ['-id', 'name']
indexes = [
models.Index(
fields=['code', 'name', 'product_group', 'product_type', 'brand', 'in_stock']
),
]
def __str__(self):
return self.name
|
[
"django.db.models.Index",
"django.utils.translation.gettext_lazy"
] |
[((352, 373), 'django.utils.translation.gettext_lazy', '_', (['"""Name of product."""'], {}), "('Name of product.')\n", (353, 373), True, 'from django.utils.translation import gettext_lazy as _\n'), ((533, 560), 'django.utils.translation.gettext_lazy', '_', (['"""related product group."""'], {}), "('related product group.')\n", (534, 560), True, 'from django.utils.translation import gettext_lazy as _\n'), ((752, 777), 'django.utils.translation.gettext_lazy', '_', (['"""related product type"""'], {}), "('related product type')\n", (753, 777), True, 'from django.utils.translation import gettext_lazy as _\n'), ((963, 989), 'django.utils.translation.gettext_lazy', '_', (['"""related product brand"""'], {}), "('related product brand')\n", (964, 989), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1111, 1145), 'django.utils.translation.gettext_lazy', '_', (['"""Amount of available products."""'], {}), "('Amount of available products.')\n", (1112, 1145), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1212, 1262), 'django.utils.translation.gettext_lazy', '_', (['"""Short summary, can be used in search results."""'], {}), "('Short summary, can be used in search results.')\n", (1213, 1262), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1369, 1390), 'django.utils.translation.gettext_lazy', '_', (['"""Long Description"""'], {}), "('Long Description')\n", (1370, 1390), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1577, 1618), 'django.utils.translation.gettext_lazy', '_', (['"""Default weight of product in grams. """'], {}), "('Default weight of product in grams. ')\n", (1578, 1618), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1742, 1820), 'django.utils.translation.gettext_lazy', '_', (['"""Release date. Product release on date, can be used for taking pre-orders."""'], {}), "('Release date. Product release on date, can be used for taking pre-orders.')\n", (1743, 1820), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1909, 1934), 'django.utils.translation.gettext_lazy', '_', (['"""Is pre-order product"""'], {}), "('Is pre-order product')\n", (1910, 1934), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1954, 1977), 'django.utils.translation.gettext_lazy', '_', (['"""Can be pre ordered"""'], {}), "('Can be pre ordered')\n", (1955, 1977), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2067, 2098), 'django.utils.translation.gettext_lazy', '_', (['"""Is the product serviceable"""'], {}), "('Is the product serviceable')\n", (2068, 2098), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2184, 2246), 'django.utils.translation.gettext_lazy', '_', (['"""Service Period in Months. How long user get free service."""'], {}), "('Service Period in Months. How long user get free service.')\n", (2185, 2246), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2269, 2299), 'django.utils.translation.gettext_lazy', '_', (['"""Service Period in Months."""'], {}), "('Service Period in Months.')\n", (2270, 2299), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2406, 2421), 'django.utils.translation.gettext_lazy', '_', (['"""Valid from"""'], {}), "('Valid from')\n", (2407, 2421), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2441, 2496), 'django.utils.translation.gettext_lazy', '_', (['"""Enter the datetime from which the product is valid"""'], {}), "('Enter the datetime from which the product is valid')\n", (2442, 2496), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2604, 2620), 'django.utils.translation.gettext_lazy', '_', (['"""Valid until"""'], {}), "('Valid until')\n", (2605, 2620), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2640, 2703), 'django.utils.translation.gettext_lazy', '_', (['"""Enter the datetime on which the product\'s validity expires"""'], {}), '("Enter the datetime on which the product\'s validity expires")\n', (2641, 2703), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4112, 4207), 'django.db.models.Index', 'models.Index', ([], {'fields': "['code', 'name', 'product_group', 'product_type', 'brand', 'in_stock']"}), "(fields=['code', 'name', 'product_group', 'product_type',\n 'brand', 'in_stock'])\n", (4124, 4207), False, 'from django.db import models\n')]
|
# Generated by Django 3.1.2 on 2020-12-29 20:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipients', '0041_auto_20201220_2242'),
]
operations = [
migrations.AlterField(
model_name='groceryrequest',
name='availability',
field=models.TextField(help_text="Our deliveries will be done on Fridays, Saturdays and Sundays between 12 and 8 PM. Please list the days and times that you're available to receive a delivery", verbose_name='Availability'),
),
migrations.AlterField(
model_name='mealrequest',
name='availability',
field=models.TextField(help_text="Our deliveries will be done on Fridays, Saturdays and Sundays between 12 and 8 PM. Please list the days and times that you're available to receive a delivery", verbose_name='Availability'),
),
]
|
[
"django.db.models.TextField"
] |
[((353, 579), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Our deliveries will be done on Fridays, Saturdays and Sundays between 12 and 8 PM. Please list the days and times that you\'re available to receive a delivery"""', 'verbose_name': '"""Availability"""'}), '(help_text=\n "Our deliveries will be done on Fridays, Saturdays and Sundays between 12 and 8 PM. Please list the days and times that you\'re available to receive a delivery"\n , verbose_name=\'Availability\')\n', (369, 579), False, 'from django.db import migrations, models\n'), ((702, 928), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Our deliveries will be done on Fridays, Saturdays and Sundays between 12 and 8 PM. Please list the days and times that you\'re available to receive a delivery"""', 'verbose_name': '"""Availability"""'}), '(help_text=\n "Our deliveries will be done on Fridays, Saturdays and Sundays between 12 and 8 PM. Please list the days and times that you\'re available to receive a delivery"\n , verbose_name=\'Availability\')\n', (718, 928), False, 'from django.db import migrations, models\n')]
|
import csv
import os
import pickle
from os import listdir
from os.path import isfile, join
import shutil
# Verify the current working directory. Depending on your environment / editor, it may not be where you think it is
# since paths are relative this will be problematic, so double check
cwd = os.getcwd()
print("\nCurrent working directory: {0}\n".format(cwd))
data_root_dir = '../original data/covid-chestxray-dataset-master/'
image_root_dir = '../original data/covid-chestxray-dataset-master/images'
image_tgt_dir_normal = '../../images/normal'
image_tgt_dir_covid = '../../images/covid-19'
image_tgt_dir_pneumonia = '../../images/pneumonia'
info_file_name = 'metadata.csv'
info_path = os.path.join(data_root_dir, info_file_name)
data_dict = {}
covid = ['COVID-19']
pneumonia_virus = ['SARS']
pneumonia_bacteria = ['Pneumocystis','Streptococcus']
normal = ['No Finding']
x0 = 0
x1 = 0
x2 = 0
x3 = 0
n_ct = 0
n_ards = 0
#print("\nPre CSV Loop: \n")
with open(info_path,'r') as f:
csv_reader = csv.reader(f)
i = 0
#print("\n\tPre Row Loop:")
for row in csv_reader:
#print("\n\t\tIn Row Loop:")
if i == 0:
i += 1
continue
patient_id = row[0]
subject_id = row[1]
view = row[18]
image_name = row[23]
disease = row[4]
modality = row[19]
#print("\n\t\tmodality is : {0}\n".format(modality))
if 'X-ray' not in modality:
n_ct += 1
continue
#print("\n\t\t\tPre JPEG Path:")
jpg_path = os.path.join(image_root_dir, image_name)
#print("\n\t\tImage Path is : {0}\n".format(jpg_path))
if os.path.exists(jpg_path) and ('AP' in view or 'PA' in view):
if data_dict.get(patient_id+'_'+subject_id) is None:
data_dict[patient_id+'_'+subject_id] = {'class':{
'COVID-19':0,
'pneumonia_virus':0,
'pneumonia_bacteria':0,
'normal':0
},
'image_dict':{}}
if disease == 'ARDS':
n_ards += 1
continue
#CCB 04/09/21 - Bad IF statements since disease can be a / delimited list
# which would never match an item in the source lists as a full string ....
# need to split disease by / and search each/every element against the source
# list
#if disease in covid:
disease = disease.split('/')[-1]
if disease in covid:
#print("\n\tFound COVID Image")
data_dict[patient_id+'_'+subject_id]['class']['COVID-19'] = 1
x0 += 1
#Copy the image to the covid folder
shutil.copyfile(jpg_path, os.path.join(image_tgt_dir_covid, image_name))
if disease in pneumonia_virus:
data_dict[patient_id+'_'+subject_id]['class']['pneumonia_virus'] = 1
x1 += 1
#Copy the image to the pneumonia folder
if ('AP' in view):
shutil.copyfile(jpg_path, os.path.join(image_tgt_dir_pneumonia, image_name))
if disease in pneumonia_bacteria:
data_dict[patient_id+'_'+subject_id]['class']['pneumonia_bacteria'] = 1
x2 += 1
#Copy the image to the pneumonia folder
if ('AP' in view):
shutil.copyfile(jpg_path, os.path.join(image_tgt_dir_pneumonia, image_name))
if disease in normal:
data_dict[patient_id+'_'+subject_id]['class']['normal'] = 1
x3 += 1
#Copy the image to the normal folder
if ('AP' in view):
shutil.copyfile(jpg_path, os.path.join(image_tgt_dir_normal, image_name))
print("\n\tPatient ID is : : {0}\n".format(patient_id))
print("\n\t\tView is : : {0}\n".format(view))
data_dict[patient_id+'_'+subject_id]['image_dict'][image_name] = {
'path':jpg_path,
'type':view
}
y0 = 0
y1 = 0
y2 = 0
y3 = 0
z0 = 0
z1 = 0
z2 = 0
z3 = 0
v0 = 0
v1 = 0
v2 = 0
v3 = 0
w0 = 0
w1 = 0
w2 = 0
w3 = 0
i = 0
j = 0
ap_list = []
pa_list = []
for key, value in data_dict.items():
for jpg_name, jpg_info in value['image_dict'].items():
print(jpg_info['type'])
y0 += value['class']['COVID-19']
y1 += value['class']['pneumonia_virus']
y2 += value['class']['pneumonia_bacteria']
y3 += value['class']['normal']
j += 1
if 'PA' in jpg_info['type'] or 'AP' in jpg_info['type']:
i += 1
z0 += value['class']['COVID-19']
z1 += value['class']['pneumonia_virus']
z2 += value['class']['pneumonia_bacteria']
z3 += value['class']['normal']
if 'PA' in jpg_info['type']:
print("\n\tIn PA loop")
pa_list.append(jpg_name)
v0 += value['class']['COVID-19']
v1 += value['class']['pneumonia_virus']
v2 += value['class']['pneumonia_bacteria']
v3 += value['class']['normal']
if 'AP' in jpg_info['type']:
#print("\n\tIn AP loop")
ap_list.append(jpg_name)
w0 += value['class']['COVID-19']
w1 += value['class']['pneumonia_virus']
w2 += value['class']['pneumonia_bacteria']
w3 += value['class']['normal']
print (x0, x1, x2, x3)
print (i, j)
print (y0, y1, y2, y3)
print (z0, z1, z2, z3)
print (v0, v1, v2, v3)
print (w0, w1, w2, w3)
pickle.dump(data_dict, open('../data_preprocess/formal_covid_dict_ap.pkl','wb'))
pickle.dump(ap_list, open('ap_list.pkl','wb'))
pickle.dump(pa_list, open('pa_list.pkl','wb'))
saved_path = '../data_preprocess/formal_covid_dict.pkl'
if os.path.exists(saved_path):
os.remove(saved_path)
pickle.dump(data_dict, open(saved_path,'wb'))
print ('finish')
##
### Xray
|
[
"os.remove",
"csv.reader",
"os.getcwd",
"os.path.exists",
"os.path.join"
] |
[((303, 314), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (312, 314), False, 'import os\n'), ((699, 742), 'os.path.join', 'os.path.join', (['data_root_dir', 'info_file_name'], {}), '(data_root_dir, info_file_name)\n', (711, 742), False, 'import os\n'), ((5681, 5707), 'os.path.exists', 'os.path.exists', (['saved_path'], {}), '(saved_path)\n', (5695, 5707), False, 'import os\n'), ((1011, 1024), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1021, 1024), False, 'import csv\n'), ((5711, 5732), 'os.remove', 'os.remove', (['saved_path'], {}), '(saved_path)\n', (5720, 5732), False, 'import os\n'), ((1492, 1532), 'os.path.join', 'os.path.join', (['image_root_dir', 'image_name'], {}), '(image_root_dir, image_name)\n', (1504, 1532), False, 'import os\n'), ((1601, 1625), 'os.path.exists', 'os.path.exists', (['jpg_path'], {}), '(jpg_path)\n', (1615, 1625), False, 'import os\n'), ((2791, 2836), 'os.path.join', 'os.path.join', (['image_tgt_dir_covid', 'image_name'], {}), '(image_tgt_dir_covid, image_name)\n', (2803, 2836), False, 'import os\n'), ((3090, 3139), 'os.path.join', 'os.path.join', (['image_tgt_dir_pneumonia', 'image_name'], {}), '(image_tgt_dir_pneumonia, image_name)\n', (3102, 3139), False, 'import os\n'), ((3407, 3456), 'os.path.join', 'os.path.join', (['image_tgt_dir_pneumonia', 'image_name'], {}), '(image_tgt_dir_pneumonia, image_name)\n', (3419, 3456), False, 'import os\n'), ((3696, 3742), 'os.path.join', 'os.path.join', (['image_tgt_dir_normal', 'image_name'], {}), '(image_tgt_dir_normal, image_name)\n', (3708, 3742), False, 'import os\n')]
|
from pettingzoo.butterfly import pistonball_v4
env = pistonball_v4.env()
print(env.observation_spaces)
print(env.action_spaces)
obs = env.reset()
print(env.num_agents)
print(env.agents)
|
[
"pettingzoo.butterfly.pistonball_v4.env"
] |
[((53, 72), 'pettingzoo.butterfly.pistonball_v4.env', 'pistonball_v4.env', ([], {}), '()\n', (70, 72), False, 'from pettingzoo.butterfly import pistonball_v4\n')]
|
"""add delivered_on to order
Revision ID: 080534ba8038
Revises: 4e8c2eb9ae96
Create Date: 2019-01-14 23:07:47.734911
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '080534ba8038'
down_revision = '4e8c2eb9ae96'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('order', sa.Column('delivered_on', sa.DateTime(), nullable=True))
op.create_index(op.f('ix_order_delivered_on'), 'order', ['delivered_on'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_order_delivered_on'), table_name='order')
op.drop_column('order', 'delivered_on')
# ### end Alembic commands ###
|
[
"alembic.op.drop_column",
"sqlalchemy.DateTime",
"alembic.op.f"
] |
[((761, 800), 'alembic.op.drop_column', 'op.drop_column', (['"""order"""', '"""delivered_on"""'], {}), "('order', 'delivered_on')\n", (775, 800), False, 'from alembic import op\n'), ((496, 525), 'alembic.op.f', 'op.f', (['"""ix_order_delivered_on"""'], {}), "('ix_order_delivered_on')\n", (500, 525), False, 'from alembic import op\n'), ((706, 735), 'alembic.op.f', 'op.f', (['"""ix_order_delivered_on"""'], {}), "('ix_order_delivered_on')\n", (710, 735), False, 'from alembic import op\n'), ((445, 458), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (456, 458), True, 'import sqlalchemy as sa\n')]
|
from flask import Flask
application = Flask(__name__)
@application.route("/")
def hello():
return "<h1>Demo via Nginx with uWSGI!</h1>"
if __name__ == "__main__":
application.run(host='127.0.0.1', port=9001)
|
[
"flask.Flask"
] |
[((38, 53), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (43, 53), False, 'from flask import Flask\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021-06-12 23:33
# @Author : Nuonuo
# @Site :
# @File : ffmpeg.py
# @Software: PyCharm
'''
ffmeg下载地址
http://www.ffmpeg.org/download.html
'''
import ffmpy3
import datetime
from moviepy.editor import *
import os
def 获取_视频_信息(path):
data = 运行2(r'ffprobe -i '+path)
# Stream #0:0(und): Video: h264 (Main) (avc1 / 0x31637661), yuv420p, 960x540, 1008 kb/s, 25 fps, 25 tbr, 25k tbn, 50 tbc (default)
# 第一个流是视频流,编码格式是H264格式(封装格式为AVC1),每一帧的数据表示为yuv420p,分辨率为960x540,这路流的比特率为1108Kbit/s,帧率为每秒钟25帧。
# Stream #0:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 92 kb/s (default)
# 这行信息表示第二个流是音频流,编码方式为ACC(封装格式为MP4A),并且采用的Profile是LC规格,采样率是44.1KHz,声道是立体声,这路流的比特率92Kbit/s。
print(data)
print('*'*20)
print('Duration: 持续时间,开始时间,比特率')
print('Stream 编码格式,每一帧的数据表示,分辨率,这路流的比特率,帧率为每秒钟*帧')
print('视频:Video 音频:Audio')
def 视频_压缩(path,音频比特率,视频比特率,保存地址):
'''
改变码率 比特率
ffmpeg -i 01.mp4 -b:a 100k -b:v 3000k nnn.mp4
ffmpeg -i Desktop/1.mov -b:v 1.5M Desktop/1.mp4
改变分辨率
ffmpeg -i Desktop/1.mov -s vga Desktop/1.mp4
-s vga : 指定分辨率, vga 代表 600*480,也可以换成其他的值
指定文件大小(不建议使用)
ffmpeg -i Desktop/吉他.mp4 -fs 15MB Desktop/output1.mp4
#fs 20 : 表示文件大小最大值为15MB
改变帕率
ffmpeg -i Desktop/吉他.mp4 -r 20 Desktop/output1.mp4
-r 20:表示帧率设置为 20fps
----------------------------
一般 根据原来视频控制 比率
'''
ff = ffmpy3.FFmpeg(
inputs={path: None},
outputs={保存地址: f'-b:a {音频比特率}k -b:v {视频比特率}k'}
)
# print(ff.cmd)
ff.run()
def 视频_切片_ts(path,保存m3u8地址):
#ffmpeg -i XXX.mp4 -c:v libx264 -c:a copy -f hls XXX.m3u8
ff = ffmpy3.FFmpeg(
inputs={path: None},
outputs={保存m3u8地址: f' -c:v libx264 -c:a copy -f hls'}
)
# print(ff.cmd)
ff.run()
def 视频_截取(path,截取时间,结束时间,保存地址):
'''
path:截取视频文件
截取时间:从第几分钟开始截取 1分钟1:00
截取时长:需要截取多长时间
保存地址:保存地址
---
ffmpeg -ss 00:03:00 -i video.mp4 -to 00:02:00 -c copy cut.mp4
去除片头 ffmpeg -ss 00:03:00 -i video.mp4 -c copy cut.mp4
'''
截取时长=minNums(截取时间,结束时间)
ff = ffmpy3.FFmpeg(
inputs={path: None},
outputs={保存地址: f' -ss {截取时间} -t {截取时长} '}
)
#print(ff.cmd)
ff.run()
def 视频_截取_快速(path,截取时间,截取时长,保存地址):
#不推荐使用 文件大 嗯嗯嗯 视频不太稳 快是快
ff = ffmpy3.FFmpeg(
inputs={path: '-ss 截取时间'},
outputs={保存地址: '-t 截取时长 -c:v copy -c:a copy'}
)
#print(ff.cmd)
ff.run()
def 视频_提取音频(path,保存地址):
'音频默认m4a格式'
#ffmpeg -i 16.mp4 -vn -codec copy out.m4a
ff = ffmpy3.FFmpeg(
inputs={path: None},
outputs={保存地址: ' -vn -codec copy '}
)
# print(ff.cmd)
ff.run()
def 视频_删除音频():
pass
'''
去掉原视频音轨
E:\anzhuangbao\ffmpeg\bin\ffmpeg -i G:\hi.mp4 -c:v copy -an G:\nosound.mp4
添加背景音乐
E:\anzhuangbao\ffmpeg\bin\ffmpeg -i G:\nosound.mp4 -i G:\songs.mp3 -t 7.1 -c y copy G:\output.mp4
方法2
合并音频和视频,保留视频原声(此时需要将mp3文件放在前面,MP4文件放在后面)否则会没有背景音乐
E:\anzhuangbao\ffmpeg\bin\ffmpeg.exe -i G:\songs.mp3 -i G:\hi.mp4 -t 7.1 -y G:\new1.mp4
-t后面跟时长 -y表示覆盖
:return:
'''
def minNums(startTime, endTime):
'''计算两个时间点之间的分钟数'''
# 处理格式,加上秒位
startTime1 = startTime
endTime1 = endTime
# 计算分钟数
startTime2 = datetime.datetime.strptime(startTime1, "%M:%S")
endTime2 = datetime.datetime.strptime(endTime1, "%M:%S")
seconds = (endTime2 - startTime2).seconds
# 来获取时间差中的秒数。注意,seconds获得的秒只是时间差中的小时、分钟和秒部分的和,并没有包含时间差的天数(既是两个时间点不是同一天,失效)
total_seconds = (endTime2 - startTime2).total_seconds()
# 来获取准确的时间差,并将时间差转换为秒
mins = total_seconds / 60
s = total_seconds % 60
if mins <10:
resu = '0'+str(int(mins))
else:resu=''+str(int(mins))
if s<10:
resu+=':0'+str(int(s))
else:resu+=':'+str(int(s))
return resu
def 音频_MP3转WAV(path,保存地址):
#ffmpeg -i music.mp3 music.wav
ff = ffmpy3.FFmpeg(
inputs={path: None},
outputs={保存地址: None}
)
#print(ff.cmd)
ff.run()
def 音频_截取(path,开始时间,截取时长,保存地址):
#ffmpeg -i music.wav -ss 0 -t 37 musicshort.wav
ff = ffmpy3.FFmpeg(
inputs={path: None},
outputs={保存地址: f' -ss {开始时间} -t {截取时长} '}
)
ff.run()
def 视频_音频_混合(音频地址,视频地址,保存地址):
#ffmpeg -i musicshort.wav -i movie.avi final_video.avi
ff = ffmpy3.FFmpeg(
inputs={音频地址: None},
outputs={保存地址: f'-i {视频地址} '}
)
ff.run()
def 视频_合成(filelist,保存文件名):
#file '10.mp4' 先生成txt文件
#操作目录默认桌面
系统.修改当前操作路径(系统.取桌面目录())
#ffmpeg -f concat -i filelist.txt -c copy out.mp4
ff = ffmpy3.FFmpeg(
inputs={None: "-f concat -i"},
outputs={保存文件名: f' {filelist} -c copy '}
)
ff.run()
def 视频_合成多个(合成文件夹,保存地址):
#注意某些视频可能因为分辨率问题影响视频合成
# 主要是需要moviepy这个库
# 定义一个数组
L = []
# 访问 video 文件夹 (假设视频都放在这里面)
for root, dirs, files in os.walk(合成文件夹):
# 遍历所有文件
# 按文件名排序
rq=[]
for i in files:
rq.append(int(i[:-4]))
rq.sort()
for file in rq:
# 如果后缀名为 .mp4
#if os.path.splitext(file)[1] == '.mp4':
# 拼接成完整路径
filePath = os.path.join(root, str(file)+'.mp4')
# 载入视频
video = VideoFileClip(filePath)
# 添加到数组
L.append(video)
# 拼接视频
final_clip = concatenate_videoclips(L)
# 生成目标视频文件
final_clip.to_videofile(r"C:\Users\Erin\Desktop\\"+保存地址, fps=24, remove_temp=False)
def 视频_合成单个(视频1,视频2,保存地址):
系统.修改当前操作路径(系统.取桌面目录())
#修改视频帕高度和宽度 后合并
video=VideoFileClip(视频1)
video2 = VideoFileClip(视频2).resize(video.size)
video_new=concatenate_videoclips([video2,video])
video_new.write_videofile(r"C:\Users\Erin\Desktop\\"+保存地址)
def 运行2(cmd):
'运行cmd命令 并返回结果'
return os.popen(cmd).read()
|
[
"datetime.datetime.strptime",
"ffmpy3.FFmpeg",
"os.walk",
"os.popen"
] |
[((1449, 1535), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': '{path: None}', 'outputs': "{保存地址: f'-b:a {音频比特率}k -b:v {视频比特率}k'}"}), "(inputs={path: None}, outputs={保存地址:\n f'-b:a {音频比特率}k -b:v {视频比特率}k'})\n", (1462, 1535), False, 'import ffmpy3\n'), ((1689, 1782), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': '{path: None}', 'outputs': "{保存m3u8地址: f' -c:v libx264 -c:a copy -f hls'}"}), "(inputs={path: None}, outputs={保存m3u8地址:\n f' -c:v libx264 -c:a copy -f hls'})\n", (1702, 1782), False, 'import ffmpy3\n'), ((2128, 2205), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': '{path: None}', 'outputs': "{保存地址: f' -ss {截取时间} -t {截取时长} '}"}), "(inputs={path: None}, outputs={保存地址: f' -ss {截取时间} -t {截取时长} '})\n", (2141, 2205), False, 'import ffmpy3\n'), ((2335, 2426), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': "{path: '-ss 截取时间'}", 'outputs': "{保存地址: '-t 截取时长 -c:v copy -c:a copy'}"}), "(inputs={path: '-ss 截取时间'}, outputs={保存地址:\n '-t 截取时长 -c:v copy -c:a copy'})\n", (2348, 2426), False, 'import ffmpy3\n'), ((2572, 2643), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': '{path: None}', 'outputs': "{保存地址: ' -vn -codec copy '}"}), "(inputs={path: None}, outputs={保存地址: ' -vn -codec copy '})\n", (2585, 2643), False, 'import ffmpy3\n'), ((3282, 3329), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['startTime1', '"""%M:%S"""'], {}), "(startTime1, '%M:%S')\n", (3308, 3329), False, 'import datetime\n'), ((3345, 3390), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['endTime1', '"""%M:%S"""'], {}), "(endTime1, '%M:%S')\n", (3371, 3390), False, 'import datetime\n'), ((3905, 3961), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': '{path: None}', 'outputs': '{保存地址: None}'}), '(inputs={path: None}, outputs={保存地址: None})\n', (3918, 3961), False, 'import ffmpy3\n'), ((4109, 4186), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': '{path: None}', 'outputs': "{保存地址: f' -ss {开始时间} -t {截取时长} '}"}), "(inputs={path: None}, outputs={保存地址: f' -ss {开始时间} -t {截取时长} '})\n", (4122, 4186), False, 'import ffmpy3\n'), ((4320, 4385), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': '{音频地址: None}', 'outputs': "{保存地址: f'-i {视频地址} '}"}), "(inputs={音频地址: None}, outputs={保存地址: f'-i {视频地址} '})\n", (4333, 4385), False, 'import ffmpy3\n'), ((4582, 4672), 'ffmpy3.FFmpeg', 'ffmpy3.FFmpeg', ([], {'inputs': "{None: '-f concat -i'}", 'outputs': "{保存文件名: f' {filelist} -c copy '}"}), "(inputs={None: '-f concat -i'}, outputs={保存文件名:\n f' {filelist} -c copy '})\n", (4595, 4672), False, 'import ffmpy3\n'), ((4865, 4879), 'os.walk', 'os.walk', (['合成文件夹'], {}), '(合成文件夹)\n', (4872, 4879), False, 'import os\n'), ((5800, 5813), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (5808, 5813), False, 'import os\n')]
|
"""OneSignal Base Client class."""
import requests
import json
class OneSignalBaseClient(object):
"""OneSignal Base Client."""
MODE_APP = "app"
MODE_USER = "user"
def _url(self, endpoint):
"""
Build the full OneSignal API URL.
:return: Returns the complete url string
:rtype: str
"""
return "https://onesignal.com/api/v1/%s" % endpoint
def _get_headers(self, custom_headers={}):
"""
Build default headers for requests. Fallback to "app" mode
:return: Returns dict which contains the headers
:rtype: dict
"""
auth = "Basic %s" % (
self.auth_key if self.mode == self.MODE_USER else self.app_api_key
)
headers = {
"Content-Type": "application/json; charset=utf-8",
"Authorization": auth,
}
headers.update(custom_headers)
return headers
def get(self, url):
"""
Perform a GET request.
:param url: URL to send the request.
:return: Returns json response
:rtype: dict or list
:raises requests.exceptions.HTTPError: if status code is not 2xx
"""
request = requests.get(url, headers=self._get_headers())
request.raise_for_status()
return request.json()
def post(self, url, payload={}, headers={}):
"""
Perform a POST request.
:param url: URL to send the request.
:param payload: dict to be sent as request body/data.
:param headers: dict with headers to be used in the request.
:return: Returns json response
:rtype: dict or list
:raises requests.exceptions.HTTPError: if status code is not 2xx
"""
json_payload = json.dumps(payload)
final_headers = self._get_headers(custom_headers=headers)
request = requests.post(url, data=json_payload, headers=final_headers)
request.raise_for_status()
return request.json()
def put(self, url, payload={}, headers={}):
"""
Perform a PUT request.
:param url: URL to send the request.
:param payload: dict to be sent as request body/data.
:param headers: dict with headers to be used in the request.
:return: Returns json response
:rtype: dict or list
:raises requests.exceptions.HTTPError: if status code is not 2xx
"""
json_payload = json.dumps(payload)
final_headers = self._get_headers(custom_headers=headers)
request = requests.put(url, data=json_payload, headers=final_headers)
request.raise_for_status()
return request.json()
def delete(self, url, headers={}):
"""
Perform a DELETE request.
:param url: URL to send the request.
:param headers: dict with headers to be used in the request.
:return: Returns json response
:rtype: dict or list
:raises requests.exceptions.HTTPError: if status code is not 2xx
"""
final_headers = self._get_headers(custom_headers=headers)
request = requests.delete(url, headers=final_headers)
request.raise_for_status()
return request.json()
|
[
"requests.put",
"requests.post",
"requests.delete",
"json.dumps"
] |
[((1777, 1796), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1787, 1796), False, 'import json\n'), ((1881, 1941), 'requests.post', 'requests.post', (['url'], {'data': 'json_payload', 'headers': 'final_headers'}), '(url, data=json_payload, headers=final_headers)\n', (1894, 1941), False, 'import requests\n'), ((2452, 2471), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (2462, 2471), False, 'import json\n'), ((2556, 2615), 'requests.put', 'requests.put', (['url'], {'data': 'json_payload', 'headers': 'final_headers'}), '(url, data=json_payload, headers=final_headers)\n', (2568, 2615), False, 'import requests\n'), ((3119, 3162), 'requests.delete', 'requests.delete', (['url'], {'headers': 'final_headers'}), '(url, headers=final_headers)\n', (3134, 3162), False, 'import requests\n')]
|
# encoding: utf-8
from __future__ import annotations
from typing import Any
from urllib.parse import urlencode
from flask import Blueprint
from ckan.common import json
from ckan.plugins.toolkit import get_action, request, h
import re
datatablesview = Blueprint(u'datatablesview', __name__)
def merge_filters(view_filters: dict[str, Any],
user_filters_str: str) -> dict[str, Any]:
u'''
view filters are built as part of the view, user filters
are selected by the user interacting with the view. Any filters
selected by user may only tighten filters set in the view,
others are ignored.
>>> merge_filters({
... u'Department': [u'BTDT'], u'OnTime_Status': [u'ONTIME']},
... u'CASE_STATUS:Open|CASE_STATUS:Closed|Department:INFO')
{u'Department': [u'BTDT'],
u'OnTime_Status': [u'ONTIME'],
u'CASE_STATUS': [u'Open', u'Closed']}
'''
filters = dict(view_filters)
if not user_filters_str:
return filters
user_filters = {}
for k_v in user_filters_str.split(u'|'):
k, _sep, v = k_v.partition(u':')
if k not in view_filters or v in view_filters[k]:
user_filters.setdefault(k, []).append(v)
for k in user_filters:
filters[k] = user_filters[k]
return filters
def ajax(resource_view_id: str):
resource_view = get_action(u'resource_view_show'
)({}, {
u'id': resource_view_id
})
draw = int(request.form[u'draw'])
search_text = str(request.form[u'search[value]'])
offset = int(request.form[u'start'])
limit = int(request.form[u'length'])
view_filters = resource_view.get(u'filters', {})
user_filters = str(request.form[u'filters'])
filters = merge_filters(view_filters, user_filters)
datastore_search = get_action(u'datastore_search')
unfiltered_response = datastore_search(
{}, {
u"resource_id": resource_view[u'resource_id'],
u"limit": 0,
u"filters": view_filters,
}
)
cols = [f[u'id'] for f in unfiltered_response[u'fields']]
if u'show_fields' in resource_view:
cols = [c for c in cols if c in resource_view[u'show_fields']]
sort_list = []
i = 0
while True:
if u'order[%d][column]' % i not in request.form:
break
sort_by_num = int(request.form[u'order[%d][column]' % i])
sort_order = (
u'desc' if request.form[u'order[%d][dir]' %
i] == u'desc' else u'asc'
)
sort_list.append(cols[sort_by_num] + u' ' + sort_order)
i += 1
colsearch_dict = {}
i = 0
while True:
if u'columns[%d][search][value]' % i not in request.form:
break
v = str(request.form[u'columns[%d][search][value]' % i])
if v:
k = str(request.form[u'columns[%d][name]' % i])
# replace non-alphanumeric characters with FTS wildcard (_)
v = re.sub(r'[^0-9a-zA-Z\-]+', '_', v)
# append ':*' so we can do partial FTS searches
colsearch_dict[k] = v + u':*'
i += 1
if colsearch_dict:
search_text = json.dumps(colsearch_dict)
else:
search_text = re.sub(r'[^0-9a-zA-Z\-]+', '_',
search_text) + u':*' if search_text else u''
try:
response = datastore_search(
{}, {
u"q": search_text,
u"resource_id": resource_view[u'resource_id'],
u'plain': False,
u'language': u'simple',
u"offset": offset,
u"limit": limit,
u"sort": u', '.join(sort_list),
u"filters": filters,
}
)
except Exception:
query_error = u'Invalid search query... ' + search_text
dtdata = {u'error': query_error}
else:
data = []
for row in response[u'records']:
record = {colname: str(row.get(colname, u''))
for colname in cols}
# the DT_RowId is used in DT to set an element id for each record
record['DT_RowId'] = 'row' + str(row.get(u'_id', u''))
data.append(record)
dtdata = {
u'draw': draw,
u'recordsTotal': unfiltered_response.get(u'total', 0),
u'recordsFiltered': response.get(u'total', 0),
u'data': data
}
return json.dumps(dtdata)
def filtered_download(resource_view_id: str):
params = json.loads(request.form[u'params'])
resource_view = get_action(u'resource_view_show'
)({}, {
u'id': resource_view_id
})
search_text = str(params[u'search'][u'value'])
view_filters = resource_view.get(u'filters', {})
user_filters = str(params[u'filters'])
filters = merge_filters(view_filters, user_filters)
datastore_search = get_action(u'datastore_search')
unfiltered_response = datastore_search(
{}, {
u"resource_id": resource_view[u'resource_id'],
u"limit": 0,
u"filters": view_filters,
}
)
cols = [f[u'id'] for f in unfiltered_response[u'fields']]
if u'show_fields' in resource_view:
cols = [c for c in cols if c in resource_view[u'show_fields']]
sort_list = []
for order in params[u'order']:
sort_by_num = int(order[u'column'])
sort_order = (u'desc' if order[u'dir'] == u'desc' else u'asc')
sort_list.append(cols[sort_by_num] + u' ' + sort_order)
cols = [c for (c, v) in zip(cols, params[u'visible']) if v]
colsearch_dict = {}
columns = params[u'columns']
for column in columns:
if column[u'search'][u'value']:
v = column[u'search'][u'value']
if v:
k = column[u'name']
# replace non-alphanumeric characters with FTS wildcard (_)
v = re.sub(r'[^0-9a-zA-Z\-]+', '_', v)
# append ':*' so we can do partial FTS searches
colsearch_dict[k] = v + u':*'
if colsearch_dict:
search_text = json.dumps(colsearch_dict)
else:
search_text = re.sub(r'[^0-9a-zA-Z\-]+', '_',
search_text) + u':*' if search_text else ''
return h.redirect_to(
h.url_for(
u'datastore.dump',
resource_id=resource_view[u'resource_id']) + u'?' + urlencode(
{
u'q': search_text,
u'plain': False,
u'language': u'simple',
u'sort': u','.join(sort_list),
u'filters': json.dumps(filters),
u'format': request.form[u'format'],
u'fields': u','.join(cols),
}))
datatablesview.add_url_rule(
u'/datatables/ajax/<resource_view_id>', view_func=ajax, methods=[u'POST']
)
datatablesview.add_url_rule(
u'/datatables/filtered-download/<resource_view_id>',
view_func=filtered_download, methods=[u'POST']
)
|
[
"flask.Blueprint",
"ckan.plugins.toolkit.get_action",
"ckan.common.json.loads",
"re.sub",
"ckan.common.json.dumps",
"ckan.plugins.toolkit.h.url_for"
] |
[((256, 294), 'flask.Blueprint', 'Blueprint', (['u"""datatablesview"""', '__name__'], {}), "(u'datatablesview', __name__)\n", (265, 294), False, 'from flask import Blueprint\n'), ((1873, 1904), 'ckan.plugins.toolkit.get_action', 'get_action', (['u"""datastore_search"""'], {}), "(u'datastore_search')\n", (1883, 1904), False, 'from ckan.plugins.toolkit import get_action, request, h\n'), ((4525, 4543), 'ckan.common.json.dumps', 'json.dumps', (['dtdata'], {}), '(dtdata)\n', (4535, 4543), False, 'from ckan.common import json\n'), ((4605, 4640), 'ckan.common.json.loads', 'json.loads', (["request.form[u'params']"], {}), "(request.form[u'params'])\n", (4615, 4640), False, 'from ckan.common import json\n'), ((5054, 5085), 'ckan.plugins.toolkit.get_action', 'get_action', (['u"""datastore_search"""'], {}), "(u'datastore_search')\n", (5064, 5085), False, 'from ckan.plugins.toolkit import get_action, request, h\n'), ((1351, 1384), 'ckan.plugins.toolkit.get_action', 'get_action', (['u"""resource_view_show"""'], {}), "(u'resource_view_show')\n", (1361, 1384), False, 'from ckan.plugins.toolkit import get_action, request, h\n'), ((3252, 3278), 'ckan.common.json.dumps', 'json.dumps', (['colsearch_dict'], {}), '(colsearch_dict)\n', (3262, 3278), False, 'from ckan.common import json\n'), ((4661, 4694), 'ckan.plugins.toolkit.get_action', 'get_action', (['u"""resource_view_show"""'], {}), "(u'resource_view_show')\n", (4671, 4694), False, 'from ckan.plugins.toolkit import get_action, request, h\n'), ((6265, 6291), 'ckan.common.json.dumps', 'json.dumps', (['colsearch_dict'], {}), '(colsearch_dict)\n', (6275, 6291), False, 'from ckan.common import json\n'), ((3054, 3088), 're.sub', 're.sub', (['"""[^0-9a-zA-Z\\\\-]+"""', '"""_"""', 'v'], {}), "('[^0-9a-zA-Z\\\\-]+', '_', v)\n", (3060, 3088), False, 'import re\n'), ((3311, 3355), 're.sub', 're.sub', (['"""[^0-9a-zA-Z\\\\-]+"""', '"""_"""', 'search_text'], {}), "('[^0-9a-zA-Z\\\\-]+', '_', search_text)\n", (3317, 3355), False, 'import re\n'), ((6074, 6108), 're.sub', 're.sub', (['"""[^0-9a-zA-Z\\\\-]+"""', '"""_"""', 'v'], {}), "('[^0-9a-zA-Z\\\\-]+', '_', v)\n", (6080, 6108), False, 'import re\n'), ((6324, 6368), 're.sub', 're.sub', (['"""[^0-9a-zA-Z\\\\-]+"""', '"""_"""', 'search_text'], {}), "('[^0-9a-zA-Z\\\\-]+', '_', search_text)\n", (6330, 6368), False, 'import re\n'), ((6464, 6535), 'ckan.plugins.toolkit.h.url_for', 'h.url_for', (['u"""datastore.dump"""'], {'resource_id': "resource_view[u'resource_id']"}), "(u'datastore.dump', resource_id=resource_view[u'resource_id'])\n", (6473, 6535), False, 'from ckan.plugins.toolkit import get_action, request, h\n'), ((6778, 6797), 'ckan.common.json.dumps', 'json.dumps', (['filters'], {}), '(filters)\n', (6788, 6797), False, 'from ckan.common import json\n')]
|
from DarkForestCreature import DarkForestCreature
# Dragon(300, 1.0, 1000, 1.0, 2000, 3600), # dpm of 200. Reward increases over time, difficult to kill.
class Dragon(DarkForestCreature):
def __init__(self, delay=300, delayMulti=1.0, attack=800, attackMulti=1.0, health=900, reward=3600,
incineration_resist=2):
DarkForestCreature.__init__(self, delay, delayMulti, attack, attackMulti, health, reward, incineration_resist)
def getAttack(self):
self.setReward(self.getReward() + 300)
retval = 'Coiling around the shield, the dragon vomits green wildfire onto the wooden barrier.'
return retval
def getCampfireAttack(self):
self.setReward(self.getReward() + 600)
retval = 'The dragon\'s spiked tail sweeps across the fire, battering the Salamander into the ash. The massive attack knocks ' + str(int(self.baseAttackStrength * self.attackStrengthMulti)) + ' logs out of the fire.'
return retval
def getSpawnMessage(self):
retval = 'A hissing emanates from deep in the forest. Something big is coming.'
return retval
|
[
"DarkForestCreature.DarkForestCreature.__init__"
] |
[((345, 459), 'DarkForestCreature.DarkForestCreature.__init__', 'DarkForestCreature.__init__', (['self', 'delay', 'delayMulti', 'attack', 'attackMulti', 'health', 'reward', 'incineration_resist'], {}), '(self, delay, delayMulti, attack, attackMulti,\n health, reward, incineration_resist)\n', (372, 459), False, 'from DarkForestCreature import DarkForestCreature\n')]
|
import pickle
import re
import string
import docx2txt
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def save_doc_as_txt(doc_loc, new_doc_loc):
text = docx2txt.process(doc_loc)
with open(new_doc_loc, 'wb') as text_file:
print(text, file=text_file)
def read_doc(doc_loc):
with open(doc_loc, 'rb') as text_file:
return text_file.read()
return None
def save_pkl_etownqa():
doc_list = [line for line in str(dc.read_doc('data/EtownDocData.txt'), 'ISO-8859-1').split('\n') if line]
combined_doc_list = [doc_list[i:i + 14] for i in range(0, len(doc_list), 14)]
final_doc_list = []
for group in range(len(combined_doc_list)):
final_doc_list.append('')
for element in combined_doc_list[group]:
final_doc_list[group] += element
with open('data/EtownQAData.pkl', 'wb') as f:
pickle.dump(final_doc_list, f)
def clean_text(text):
'''
Make text lowercase,
remove text in square brackets,
remove punctuation and
remove words containing numbers.
'''
text = text.lower()
text = re.sub('\[.*?\]', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\w*\d\w*', '', text)
text = re.sub('[’‘’“”…]', '', text)
text = re.sub('\n', '', text)
return text
def remove_stop_words(text):
nltk.download('stopwords')
nltk.download('punkt')
stop_words = list(stopwords.words('english'))
# Add custom stop words to be eliminated
stop_words.append('elizabethtown')
stop_words.append('college')
stop_words.append('etown')
words = word_tokenize(text)
text = ' '.join(str(j) for j in words if j not in stop_words and (len(j) != 1))
return text
def encode_response(category):
if isinstance(category, str) or category == -1:
return 'Sorry, I did not understand that. Could you rephrase your question?'
responses = ['Learn more about the history of Elizabethtown College at https://en.wikipedia.org/wiki/Elizabethtown_College',
'Check out student life at https://www.etown.edu/about',
'Find the location of Elizabethtown College at https://en.wikipedia.org/wiki/Elizabethtown_College',
'Check out the sports page at https://etownbluejays.com/',
'Tuition costs can be found at https://www.etown.edu/admissions/tuition-cost.aspx',
'Undergrad enrollment of 1,688 and the school lies on 203 acres',
'There is no greek life at Etown',
'A list of majors and minors can be found at https://www.etown.edu/academics/majors-minors.aspx',
'Classroom stats and info can be found at https://www.usnews.com/best-colleges/elizabethtown-college-3262',
'Professors and their information is located at https://www.etown.edu/directory',
'Important dates for Elizabethtown College can be found at https://www.etown.edu/offices/registration-records/academic-calendar-2022-23.aspx',
'Unique things about Elizabethtown is located at https://www.etown.edu/#:~:text=Why%20Etown%3F,the%20world%20needs%20more%20of.',
'Fun things to around Elizabethtown College can be found at https://www.tripadvisor.com/AttractionsNear-g52581-d5789493-Elizabethtown_College-Elizabethtown_Lancaster_County_Pennsylvania.html',
'More information on this can be found at https://www.usnews.com/best-colleges/elizabethtown-college-3262',
'Group work can be found at https://www.etown.edu/campus-life/student-clubs.aspx',
'Requirements by admissions can be looked at here: https://www.prepscholar.com/sat/s/colleges/Elizabethtown-College-admission-requirements#:~:text=Average%20GPA%3A%203.5&text=With%20a%20GPA%20of%203.5,like%20AP%20or%20IB%20classes.',
'Studying abroad information can be located at https://www.etown.edu/offices/study-abroad',
'Student life information can be found at https://www.etown.edu/campus-life/student-clubs.aspx',
'Dining services can be found at https://www.etown.edu/offices/dining/index.aspx',
'Information on living at Elizabethtown College can be found at https://www.etown.edu/offices/community-living/halls-apts/index.aspx',
'Wireless access at Elizabethtown College can be found at https://www.etown.edu/offices/its/Wireless_Access.aspx',
'Job rates after graduation can be found at https://www.etown.edu/admissions/outcomes.aspx#:~:text=96%25,within%20one%20year%20of%20graduation.',
'Commencement at Etown can be found at https://www.etown.edu/commencement',
'Accredidations at Elizabethtown College can be found at https://www.etown.edu/offices/institutional-research/accreditations.aspx',
'Etown has strong campus security, more info can be found at https://www.etown.edu/offices/security/index.aspx',
'Elizabethtown College statistics can be found at https://www.usnews.com/best-colleges/elizabethtown-college-3262#:~:text=Elizabethtown%20College%20has%20a%20total,of%20students%20live%20off%20campus.',
'Elizabethtown College clubs can be found at https://www.etown.edu/campus-life/student-clubs.aspx',
'The alumni association can be found at https://www.etownalumni.com/s/154/bp/home.aspx ']
return responses[category]
def clean_speech(speech):
to_replace = ['eternal', 'attendee down', 'always have a town college',
'he town', 'a town', 'each town', 'eat out',
'always a bit on college', 'eat em',
'always return college', 'eaten', 'eat home', 'town']
for phrase in to_replace:
speech = speech.replace(phrase, 'Etown')
return speech
def is_junk(speech):
junk_list = ['huh']
for phrase in junk_list:
if speech == phrase:
return True
return False
|
[
"pickle.dump",
"nltk.download",
"re.escape",
"nltk.corpus.stopwords.words",
"docx2txt.process",
"re.sub",
"nltk.tokenize.word_tokenize"
] |
[((197, 222), 'docx2txt.process', 'docx2txt.process', (['doc_loc'], {}), '(doc_loc)\n', (213, 222), False, 'import docx2txt\n'), ((1135, 1164), 're.sub', 're.sub', (['"""\\\\[.*?\\\\]"""', '""""""', 'text'], {}), "('\\\\[.*?\\\\]', '', text)\n", (1141, 1164), False, 'import re\n'), ((1242, 1273), 're.sub', 're.sub', (['"""\\\\w*\\\\d\\\\w*"""', '""""""', 'text'], {}), "('\\\\w*\\\\d\\\\w*', '', text)\n", (1248, 1273), False, 'import re\n'), ((1282, 1310), 're.sub', 're.sub', (['"""[’‘’“”…]"""', '""""""', 'text'], {}), "('[’‘’“”…]', '', text)\n", (1288, 1310), False, 'import re\n'), ((1322, 1344), 're.sub', 're.sub', (['"""\n"""', '""""""', 'text'], {}), "('\\n', '', text)\n", (1328, 1344), False, 'import re\n'), ((1396, 1422), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (1409, 1422), False, 'import nltk\n'), ((1427, 1449), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (1440, 1449), False, 'import nltk\n'), ((1663, 1682), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (1676, 1682), False, 'from nltk.tokenize import word_tokenize\n'), ((904, 934), 'pickle.dump', 'pickle.dump', (['final_doc_list', 'f'], {}), '(final_doc_list, f)\n', (915, 934), False, 'import pickle\n'), ((1473, 1499), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1488, 1499), False, 'from nltk.corpus import stopwords\n'), ((1190, 1219), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (1199, 1219), False, 'import re\n')]
|
#!/usr/bin/env python
from motor.motor_asyncio import AsyncIOMotorClient
from src.config import CONFIG
from src.utils.tools import singleton
@singleton
class MotorBase:
"""
更改mongodb连接方式 单例模式下支持多库操作
About motor's doc: https://github.com/mongodb/motor
"""
_db = {}
_collection = {}
MONGODB = CONFIG.MONGODB
def __init__(self):
self.motor_uri = ''
def client(self, db):
# motor
self.motor_uri = 'mongodb://{account}{host}:{port}/{database}'.format(
account='{username}:{password}@'.format(
username=self.MONGODB['MONGO_USERNAME'],
password=self.MONGODB['MONGO_PASSWORD']) if self.MONGODB['MONGO_USERNAME'] else '',
host=self.MONGODB['MONGO_HOST'] if self.MONGODB['MONGO_HOST'] else 'localhost',
port=self.MONGODB['MONGO_PORT'] if self.MONGODB['MONGO_PORT'] else 27017,
database=db)
return AsyncIOMotorClient(self.motor_uri)
def get_db(self, db=MONGODB['DATABASE']):
"""
Get a db instance
:param db: database name
:return: the motor db instance
"""
if db not in self._db:
self._db[db] = self.client(db)[db]
return self._db[db]
def get_collection(self, db_name, collection):
"""
Get a collection instance
:param db_name: database name
:param collection: collection name
:return: the motor collection instance
"""
collection_key = db_name + collection
if collection_key not in self._collection:
self._collection[collection_key] = self.get_db(db_name)[collection]
return self._collection[collection_key]
class MotorBaseOld:
"""
use motor to connect mongodb
2017-09-21 deleted
"""
_db = None
MONGODB = CONFIG.MONGODB
def client(self, db):
# motor
self.motor_uri = 'mongodb://{account}{host}:{port}/{database}'.format(
account='{username}:{password}@'.format(
username=self.MONGODB['MONGO_USERNAME'],
password=self.MONGODB['MONGO_PASSWORD']) if self.MONGODB['MONGO_USERNAME'] else '',
host=self.MONGODB['MONGO_HOST'] if self.MONGODB['MONGO_HOST'] else 'localhost',
port=self.MONGODB['MONGO_PORT'] if self.MONGODB['MONGO_PORT'] else 27017,
database=db)
return AsyncIOMotorClient(self.motor_uri)
@property
def db(self):
if self._db is None:
self._db = self.client(self.MONGODB['DATABASE'])[self.MONGODB['DATABASE']]
return self._db
|
[
"motor.motor_asyncio.AsyncIOMotorClient"
] |
[((940, 974), 'motor.motor_asyncio.AsyncIOMotorClient', 'AsyncIOMotorClient', (['self.motor_uri'], {}), '(self.motor_uri)\n', (958, 974), False, 'from motor.motor_asyncio import AsyncIOMotorClient\n'), ((2403, 2437), 'motor.motor_asyncio.AsyncIOMotorClient', 'AsyncIOMotorClient', (['self.motor_uri'], {}), '(self.motor_uri)\n', (2421, 2437), False, 'from motor.motor_asyncio import AsyncIOMotorClient\n')]
|
import tulip
import viol
from demolib import set_title, make_widget, make_widget_synchronous
set_title('Bar runs code in another thread')
@viol.connect('#go click')
def start(client, data):
coroutines = [foo(client), bar(client)]
for future in tulip.as_completed(coroutines):
result = yield from future
client.log(result)
def foo(client):
client.log('Starting foo.')
for i in range(8):
widget = yield from make_widget()
client.append_widget('foo', widget)
return 'foo is done!'
def bar(client):
client.log('Starting bar work in another thread.')
loop = tulip.get_event_loop()
widgets = yield from loop.run_in_executor(None, bar_synchronous)
for widget in widgets:
client.append_widget('bar', widget)
return 'bar is done!'
def bar_synchronous():
widgets = []
for i in range(12):
widget = make_widget_synchronous()
widgets.append(widget)
return widgets
if __name__ == '__main__':
viol.run()
|
[
"demolib.make_widget_synchronous",
"demolib.set_title",
"viol.run",
"demolib.make_widget",
"tulip.as_completed",
"viol.connect",
"tulip.get_event_loop"
] |
[((94, 138), 'demolib.set_title', 'set_title', (['"""Bar runs code in another thread"""'], {}), "('Bar runs code in another thread')\n", (103, 138), False, 'from demolib import set_title, make_widget, make_widget_synchronous\n'), ((141, 166), 'viol.connect', 'viol.connect', (['"""#go click"""'], {}), "('#go click')\n", (153, 166), False, 'import viol\n'), ((254, 284), 'tulip.as_completed', 'tulip.as_completed', (['coroutines'], {}), '(coroutines)\n', (272, 284), False, 'import tulip\n'), ((617, 639), 'tulip.get_event_loop', 'tulip.get_event_loop', ([], {}), '()\n', (637, 639), False, 'import tulip\n'), ((996, 1006), 'viol.run', 'viol.run', ([], {}), '()\n', (1004, 1006), False, 'import viol\n'), ((888, 913), 'demolib.make_widget_synchronous', 'make_widget_synchronous', ([], {}), '()\n', (911, 913), False, 'from demolib import set_title, make_widget, make_widget_synchronous\n'), ((449, 462), 'demolib.make_widget', 'make_widget', ([], {}), '()\n', (460, 462), False, 'from demolib import set_title, make_widget, make_widget_synchronous\n')]
|
import os
from random import random, sample
import numpy as np
from PIL import Image, ImageDraw
from skimage.segmentation import felzenszwalb
from skimage.morphology import skeletonize, remove_small_objects
from skimage.util import invert
from tqdm import tqdm
import cv2
def cv2pil(cv2_img):
if len(cv2_img.shape) == 2 or cv2_img.shape[2]==1:
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_GRAY2RGB)
else:
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
pil_img = Image.fromarray(cv2_img.astype('uint8'))
return pil_img
def pil2cv(pil_img):
pil_img = pil_img.convert('RGB')
cv2_img = np.array(pil_img)
cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_RGB2BGR)
cv2_img = cv2_img[:, :, ::-1].copy()
return cv2_img
def posterize(im, n):
indices = np.arange(0,256) # List of all colors
divider = np.linspace(0,255,n+1)[1] # we get a divider
quantiz = np.int0(np.linspace(0,255,n)) # we get quantization colors
color_levels = np.clip(np.int0(indices/divider),0,n-1) # color levels 0,1,2..
palette = quantiz[color_levels] # Creating the palette
im2 = palette[im] # Applying palette on image
im2 = cv2.convertScaleAbs(im2) # Converting image back to uint8
return im2
def canny(im1):
im1 = pil2cv(im1)
im2 = cv2.GaussianBlur(im1, (5, 5), 0)
im2 = cv2.Canny(im2, 100, 150)
im2 = cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB)
im2 = cv2pil(im2)
return im2
def image2colorlabels(img, colors):
h, w = img.height, img.width
pixels = np.array(list(img.getdata()))
dists = np.array([np.sum(np.abs(pixels-c), axis=1) for c in colors])
classes = np.argmin(dists, axis=0)
def colorize_labels(img, colors):
h, w = img.height, img.width
classes = image2colorlabels(img, colors)
img = Image.fromarray(np.uint8(classes.reshape((h, w, 3))))
return img
def quantize_colors(img, colors):
h, w = img.height, img.width
classes = image2colorlabels(img, colors)
pixels_clr = np.array([colors[p] for p in classes]).reshape((h, w, 3))
img = Image.fromarray(np.uint8(pixels_clr))
return img
def segment(img):
img = pil2cv(img)
h, w = img.shape[0:2]
img = cv2.bilateralFilter(img, 9, 100, 100)
scale = int(h * w / 1000)
segments = felzenszwalb(img, scale=scale, sigma=0.5, min_size=150)
out_image = np.zeros((h, w, 3))
num_segments = len(np.unique(segments))
for s in tqdm(range(num_segments)):
label_map = segments==s
label_map3 = np.dstack([label_map] * 3)
masked_img = np.multiply(label_map3, img)
#avg_color = np.sum(np.sum(masked_img, axis=0), axis=0) / np.count_nonzero(label_map) # maybe median is better
nonzeros = [ masked_img[:, :, c].reshape((h * w)) for c in range(3) ]
median_color = [ np.median(np.take(nonzeros[c], nonzeros[c].nonzero())) for c in range(3) ]
smooth_segment = (label_map3 * median_color).astype('uint8')
out_image += smooth_segment
out_image = Image.fromarray(out_image.astype('uint8'))
return out_image
def trace(img):
img = pil2cv(img)
im2 = cv2.GaussianBlur(img, (5, 5), 0)
im3 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)
ret, im4 = cv2.threshold(im3, 127, 255, 0)
ret, img = cv2.threshold(im3, 255, 255, 0)
im5, contours, hierarchy = cv2.findContours(im4, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [ c for c in contours if cv2.arcLength(c, True) > 8 ] #and cv2.contourArea(c) > 10]
for contour in contours:
cv2.drawContours(img, [contour], 0, (255), 2)
img = cv2pil(img)
return img
def simplify(img, hed_model_path):
import hed_processing
w, h = img.width, img.height
size_thresh = 0.001 * w * h
img = pil2cv(img)
img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.GaussianBlur(img, (3, 3), 0)
img = hed_processing.run_hed(cv2pil(img), hed_model_path)
ret, img = cv2.threshold(pil2cv(img), 50, 255, 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = remove_small_objects(img.astype('bool'), size_thresh)
img = 255 * skeletonize(img).astype('uint8')
img = cv2pil(img)
return img
def upsample(img, w2, h2):
h1, w1 = img.height, img.width
r = max(float(w2)/w1, float(h2)/h1)
img = img.resize((int(r*w1), int(r*h1)), resample=Image.BICUBIC)
return img
def crop_rot_resize(img, frac, w2, h2, ang, stretch, centered):
if w2 is None:
w2 = img.width
if h2 is None:
h2 = img.height
if img.height < h2 or img.width < w2:
img = upsample(img, w2, h2)
if stretch != 0:
v = random() < 0.5
h = 1.0 if not v else (1.0 + stretch)
w = 1.0 if v else (1.0 + stretch)
img = img.resize((int(img.width * w), int(img.height * h)), resample=Image.BICUBIC)
if ang > 0:
img = img.rotate(ang, resample=Image.BICUBIC, expand=False)
ar = float(w2 / h2)
h1, w1 = img.height, img.width
if float(w1) / h1 > ar:
h1_crop = max(h2, h1 * frac)
w1_crop = h1_crop * ar
else:
w1_crop = max(w2, w1 * frac)
h1_crop = w1_crop / ar
xr, yr = (0.5, 0.5) if centered else (random(), random())
x_crop, y_crop = (w1 - w1_crop - 1) * xr, (h1 - h1_crop - 1) * yr
h1_crop, w1_crop, y_crop, x_crop = int(h1_crop), int(w1_crop), int(y_crop), int(x_crop)
img_crop = img.crop((x_crop, y_crop, x_crop+w1_crop, y_crop+h1_crop))
img_resize = img_crop.resize((w2, h2), resample=Image.BICUBIC)
return img_resize
|
[
"cv2.GaussianBlur",
"numpy.abs",
"cv2.arcLength",
"numpy.argmin",
"cv2.bilateralFilter",
"numpy.arange",
"numpy.unique",
"numpy.multiply",
"cv2.cvtColor",
"skimage.morphology.skeletonize",
"cv2.convertScaleAbs",
"numpy.linspace",
"cv2.drawContours",
"numpy.dstack",
"cv2.Canny",
"numpy.uint8",
"numpy.int0",
"random.random",
"cv2.threshold",
"numpy.zeros",
"numpy.array",
"skimage.segmentation.felzenszwalb",
"cv2.findContours"
] |
[((628, 645), 'numpy.array', 'np.array', (['pil_img'], {}), '(pil_img)\n', (636, 645), True, 'import numpy as np\n'), ((661, 701), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img', 'cv2.COLOR_RGB2BGR'], {}), '(cv2_img, cv2.COLOR_RGB2BGR)\n', (673, 701), False, 'import cv2\n'), ((808, 825), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (817, 825), True, 'import numpy as np\n'), ((1183, 1207), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['im2'], {}), '(im2)\n', (1202, 1207), False, 'import cv2\n'), ((1306, 1338), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im1', '(5, 5)', '(0)'], {}), '(im1, (5, 5), 0)\n', (1322, 1338), False, 'import cv2\n'), ((1349, 1373), 'cv2.Canny', 'cv2.Canny', (['im2', '(100)', '(150)'], {}), '(im2, 100, 150)\n', (1358, 1373), False, 'import cv2\n'), ((1384, 1421), 'cv2.cvtColor', 'cv2.cvtColor', (['im2', 'cv2.COLOR_GRAY2RGB'], {}), '(im2, cv2.COLOR_GRAY2RGB)\n', (1396, 1421), False, 'import cv2\n'), ((1660, 1684), 'numpy.argmin', 'np.argmin', (['dists'], {'axis': '(0)'}), '(dists, axis=0)\n', (1669, 1684), True, 'import numpy as np\n'), ((2216, 2253), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['img', '(9)', '(100)', '(100)'], {}), '(img, 9, 100, 100)\n', (2235, 2253), False, 'import cv2\n'), ((2299, 2354), 'skimage.segmentation.felzenszwalb', 'felzenszwalb', (['img'], {'scale': 'scale', 'sigma': '(0.5)', 'min_size': '(150)'}), '(img, scale=scale, sigma=0.5, min_size=150)\n', (2311, 2354), False, 'from skimage.segmentation import felzenszwalb\n'), ((2371, 2390), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {}), '((h, w, 3))\n', (2379, 2390), True, 'import numpy as np\n'), ((3138, 3170), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (3154, 3170), False, 'import cv2\n'), ((3181, 3218), 'cv2.cvtColor', 'cv2.cvtColor', (['im2', 'cv2.COLOR_RGB2GRAY'], {}), '(im2, cv2.COLOR_RGB2GRAY)\n', (3193, 3218), False, 'import cv2\n'), ((3234, 3265), 'cv2.threshold', 'cv2.threshold', (['im3', '(127)', '(255)', '(0)'], {}), '(im3, 127, 255, 0)\n', (3247, 3265), False, 'import cv2\n'), ((3281, 3312), 'cv2.threshold', 'cv2.threshold', (['im3', '(255)', '(255)', '(0)'], {}), '(im3, 255, 255, 0)\n', (3294, 3312), False, 'import cv2\n'), ((3344, 3405), 'cv2.findContours', 'cv2.findContours', (['im4', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im4, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3360, 3405), False, 'import cv2\n'), ((3785, 3817), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)', '(0)'], {}), '(img, (3, 3), 0)\n', (3801, 3817), False, 'import cv2\n'), ((3828, 3860), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)', '(0)'], {}), '(img, (3, 3), 0)\n', (3844, 3860), False, 'import cv2\n'), ((3987, 4024), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3999, 4024), False, 'import cv2\n'), ((368, 409), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img', 'cv2.COLOR_GRAY2RGB'], {}), '(cv2_img, cv2.COLOR_GRAY2RGB)\n', (380, 409), False, 'import cv2\n'), ((438, 478), 'cv2.cvtColor', 'cv2.cvtColor', (['cv2_img', 'cv2.COLOR_BGR2RGB'], {}), '(cv2_img, cv2.COLOR_BGR2RGB)\n', (450, 478), False, 'import cv2\n'), ((863, 889), 'numpy.linspace', 'np.linspace', (['(0)', '(255)', '(n + 1)'], {}), '(0, 255, n + 1)\n', (874, 889), True, 'import numpy as np\n'), ((930, 952), 'numpy.linspace', 'np.linspace', (['(0)', '(255)', 'n'], {}), '(0, 255, n)\n', (941, 952), True, 'import numpy as np\n'), ((1008, 1034), 'numpy.int0', 'np.int0', (['(indices / divider)'], {}), '(indices / divider)\n', (1015, 1034), True, 'import numpy as np\n'), ((2101, 2121), 'numpy.uint8', 'np.uint8', (['pixels_clr'], {}), '(pixels_clr)\n', (2109, 2121), True, 'import numpy as np\n'), ((2414, 2433), 'numpy.unique', 'np.unique', (['segments'], {}), '(segments)\n', (2423, 2433), True, 'import numpy as np\n'), ((2528, 2554), 'numpy.dstack', 'np.dstack', (['([label_map] * 3)'], {}), '([label_map] * 3)\n', (2537, 2554), True, 'import numpy as np\n'), ((2576, 2604), 'numpy.multiply', 'np.multiply', (['label_map3', 'img'], {}), '(label_map3, img)\n', (2587, 2604), True, 'import numpy as np\n'), ((3542, 3585), 'cv2.drawContours', 'cv2.drawContours', (['img', '[contour]', '(0)', '(255)', '(2)'], {}), '(img, [contour], 0, 255, 2)\n', (3558, 3585), False, 'import cv2\n'), ((2017, 2055), 'numpy.array', 'np.array', (['[colors[p] for p in classes]'], {}), '([colors[p] for p in classes])\n', (2025, 2055), True, 'import numpy as np\n'), ((4648, 4656), 'random.random', 'random', ([], {}), '()\n', (4654, 4656), False, 'from random import random, sample\n'), ((5217, 5225), 'random.random', 'random', ([], {}), '()\n', (5223, 5225), False, 'from random import random, sample\n'), ((5227, 5235), 'random.random', 'random', ([], {}), '()\n', (5233, 5235), False, 'from random import random, sample\n'), ((1602, 1620), 'numpy.abs', 'np.abs', (['(pixels - c)'], {}), '(pixels - c)\n', (1608, 1620), True, 'import numpy as np\n'), ((3446, 3468), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (3459, 3468), False, 'import cv2\n'), ((4105, 4121), 'skimage.morphology.skeletonize', 'skeletonize', (['img'], {}), '(img)\n', (4116, 4121), False, 'from skimage.morphology import skeletonize, remove_small_objects\n')]
|
import logging
import os
from posixpath import join as posix_join
from urllib.parse import urljoin
from feedgenerator import Atom1Feed, Rss201rev2Feed, get_tag_uri
from markupsafe import Markup
from pelican.paginator import Paginator
from pelican.plugins import signals
from pelican.utils import (get_relative_path, is_selected_for_writing,
path_to_url, sanitised_join, set_date_tzinfo)
logger = logging.getLogger(__name__)
class Writer:
def __init__(self, output_path, settings=None):
self.output_path = output_path
self.reminder = dict()
self.settings = settings or {}
self._written_files = set()
self._overridden_files = set()
# See Content._link_replacer for details
if "RELATIVE_URLS" in self.settings and self.settings['RELATIVE_URLS']:
self.urljoiner = posix_join
else:
self.urljoiner = lambda base, url: urljoin(
base if base.endswith('/') else base + '/', url)
def _create_new_feed(self, feed_type, feed_title, context):
feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed
if feed_title:
feed_title = context['SITENAME'] + ' - ' + feed_title
else:
feed_title = context['SITENAME']
feed = feed_class(
title=Markup(feed_title).striptags(),
link=(self.site_url + '/'),
feed_url=self.feed_url,
description=context.get('SITESUBTITLE', ''),
subtitle=context.get('SITESUBTITLE', None))
return feed
def _add_item_to_the_feed(self, feed, item):
title = Markup(item.title).striptags()
link = self.urljoiner(self.site_url, item.url)
if isinstance(feed, Rss201rev2Feed):
# RSS feeds use a single tag called 'description' for both the full
# content and the summary
content = None
if self.settings.get('RSS_FEED_SUMMARY_ONLY'):
description = item.summary
else:
description = item.get_content(self.site_url)
else:
# Atom feeds have two different tags for full content (called
# 'content' by feedgenerator) and summary (called 'description' by
# feedgenerator).
#
# It does not make sense to have the summary be the
# exact same thing as the full content. If we detect that
# they are we just remove the summary.
content = item.get_content(self.site_url)
description = item.summary
if description == content:
description = None
categories = list()
if hasattr(item, 'category'):
categories.append(item.category)
if hasattr(item, 'tags'):
categories.extend(item.tags)
feed.add_item(
title=title,
link=link,
unique_id=get_tag_uri(link, item.date),
description=description,
content=content,
categories=categories if categories else None,
author_name=getattr(item, 'author', ''),
pubdate=set_date_tzinfo(
item.date, self.settings.get('TIMEZONE', None)),
updateddate=set_date_tzinfo(
item.modified, self.settings.get('TIMEZONE', None)
) if hasattr(item, 'modified') else None)
def _open_w(self, filename, encoding, override=False):
"""Open a file to write some content to it.
Exit if we have already written to that file, unless one (and no more
than one) of the writes has the override parameter set to True.
"""
if filename in self._overridden_files:
if override:
raise RuntimeError('File %s is set to be overridden twice'
% filename)
else:
logger.info('Skipping %s', filename)
filename = os.devnull
elif filename in self._written_files:
if override:
logger.info('Overwriting %s', filename)
else:
raise RuntimeError('File %s is to be overwritten' % filename)
if override:
self._overridden_files.add(filename)
self._written_files.add(filename)
return open(filename, 'w', encoding=encoding)
def write_feed(self, elements, context, path=None, url=None,
feed_type='atom', override_output=False, feed_title=None):
"""Generate a feed with the list of articles provided
Return the feed. If no path or output_path is specified, just
return the feed object.
:param elements: the articles to put on the feed.
:param context: the context to get the feed metadata.
:param path: the path to output.
:param url: the publicly visible feed URL; if None, path is used
instead
:param feed_type: the feed type to use (atom or rss)
:param override_output: boolean telling if we can override previous
output with the same name (and if next files written with the same
name should be skipped to keep that one)
:param feed_title: the title of the feed.o
"""
if not is_selected_for_writing(self.settings, path):
return
self.site_url = context.get(
'SITEURL', path_to_url(get_relative_path(path)))
self.feed_domain = context.get('FEED_DOMAIN')
self.feed_url = self.urljoiner(self.feed_domain, url if url else path)
feed = self._create_new_feed(feed_type, feed_title, context)
max_items = len(elements)
if self.settings['FEED_MAX_ITEMS']:
max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)
for i in range(max_items):
self._add_item_to_the_feed(feed, elements[i])
signals.feed_generated.send(context, feed=feed)
if path:
complete_path = sanitised_join(self.output_path, path)
try:
os.makedirs(os.path.dirname(complete_path))
except Exception:
pass
with self._open_w(complete_path, 'utf-8', override_output) as fp:
feed.write(fp, 'utf-8')
logger.info('Writing %s', complete_path)
signals.feed_written.send(
complete_path, context=context, feed=feed)
return feed
def write_file(self, name, template, context, relative_urls=False,
paginated=None, template_name=None, override_output=False,
url=None, **kwargs):
"""Render the template and write the file.
:param name: name of the file to output
:param template: template to use to generate the content
:param context: dict to pass to the templates.
:param relative_urls: use relative urls or absolutes ones
:param paginated: dict of article list to paginate - must have the
same length (same list in different orders)
:param template_name: the template name, for pagination
:param override_output: boolean telling if we can override previous
output with the same name (and if next files written with the same
name should be skipped to keep that one)
:param url: url of the file (needed by the paginator)
:param **kwargs: additional variables to pass to the templates
"""
if name is False or \
name == "" or \
not is_selected_for_writing(self.settings,
os.path.join(self.output_path, name)):
return
elif not name:
# other stuff, just return for now
return
def _write_file(template, localcontext, output_path, name, override):
"""Render the template write the file."""
# set localsiteurl for context so that Contents can adjust links
if localcontext['localsiteurl']:
context['localsiteurl'] = localcontext['localsiteurl']
output = template.render(localcontext)
path = sanitised_join(output_path, name)
try:
os.makedirs(os.path.dirname(path))
except Exception:
pass
with self._open_w(path, 'utf-8', override=override) as f:
f.write(output)
logger.info('Writing %s', path)
# Send a signal to say we're writing a file with some specific
# local context.
signals.content_written.send(path, context=localcontext)
def _get_localcontext(context, name, kwargs, relative_urls):
localcontext = context.copy()
localcontext['localsiteurl'] = localcontext.get(
'localsiteurl', None)
if relative_urls:
relative_url = path_to_url(get_relative_path(name))
localcontext['SITEURL'] = relative_url
localcontext['localsiteurl'] = relative_url
localcontext['output_file'] = name
localcontext.update(kwargs)
return localcontext
if paginated is None:
paginated = {key: val for key, val in kwargs.items()
if key in {'articles', 'dates'}}
# pagination
if paginated and template_name in self.settings['PAGINATED_TEMPLATES']:
# pagination needed
per_page = self.settings['PAGINATED_TEMPLATES'][template_name] \
or self.settings['DEFAULT_PAGINATION']
# init paginators
paginators = {key: Paginator(name, url, val, self.settings,
per_page)
for key, val in paginated.items()}
# generated pages, and write
for page_num in range(list(paginators.values())[0].num_pages):
paginated_kwargs = kwargs.copy()
for key in paginators.keys():
paginator = paginators[key]
previous_page = paginator.page(page_num) \
if page_num > 0 else None
page = paginator.page(page_num + 1)
next_page = paginator.page(page_num + 2) \
if page_num + 1 < paginator.num_pages else None
paginated_kwargs.update(
{'%s_paginator' % key: paginator,
'%s_page' % key: page,
'%s_previous_page' % key: previous_page,
'%s_next_page' % key: next_page})
localcontext = _get_localcontext(
context, page.save_as, paginated_kwargs, relative_urls)
_write_file(template, localcontext, self.output_path,
page.save_as, override_output)
else:
# no pagination
localcontext = _get_localcontext(
context, name, kwargs, relative_urls)
_write_file(template, localcontext, self.output_path, name,
override_output)
|
[
"pelican.plugins.signals.feed_generated.send",
"pelican.utils.get_relative_path",
"pelican.plugins.signals.feed_written.send",
"os.path.dirname",
"pelican.paginator.Paginator",
"pelican.utils.sanitised_join",
"markupsafe.Markup",
"feedgenerator.get_tag_uri",
"pelican.utils.is_selected_for_writing",
"os.path.join",
"logging.getLogger",
"pelican.plugins.signals.content_written.send"
] |
[((427, 454), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (444, 454), False, 'import logging\n'), ((5930, 5977), 'pelican.plugins.signals.feed_generated.send', 'signals.feed_generated.send', (['context'], {'feed': 'feed'}), '(context, feed=feed)\n', (5957, 5977), False, 'from pelican.plugins import signals\n'), ((5309, 5353), 'pelican.utils.is_selected_for_writing', 'is_selected_for_writing', (['self.settings', 'path'], {}), '(self.settings, path)\n', (5332, 5353), False, 'from pelican.utils import get_relative_path, is_selected_for_writing, path_to_url, sanitised_join, set_date_tzinfo\n'), ((6023, 6061), 'pelican.utils.sanitised_join', 'sanitised_join', (['self.output_path', 'path'], {}), '(self.output_path, path)\n', (6037, 6061), False, 'from pelican.utils import get_relative_path, is_selected_for_writing, path_to_url, sanitised_join, set_date_tzinfo\n'), ((6380, 6448), 'pelican.plugins.signals.feed_written.send', 'signals.feed_written.send', (['complete_path'], {'context': 'context', 'feed': 'feed'}), '(complete_path, context=context, feed=feed)\n', (6405, 6448), False, 'from pelican.plugins import signals\n'), ((8204, 8237), 'pelican.utils.sanitised_join', 'sanitised_join', (['output_path', 'name'], {}), '(output_path, name)\n', (8218, 8237), False, 'from pelican.utils import get_relative_path, is_selected_for_writing, path_to_url, sanitised_join, set_date_tzinfo\n'), ((8622, 8678), 'pelican.plugins.signals.content_written.send', 'signals.content_written.send', (['path'], {'context': 'localcontext'}), '(path, context=localcontext)\n', (8650, 8678), False, 'from pelican.plugins import signals\n'), ((1651, 1669), 'markupsafe.Markup', 'Markup', (['item.title'], {}), '(item.title)\n', (1657, 1669), False, 'from markupsafe import Markup\n'), ((2955, 2983), 'feedgenerator.get_tag_uri', 'get_tag_uri', (['link', 'item.date'], {}), '(link, item.date)\n', (2966, 2983), False, 'from feedgenerator import Atom1Feed, Rss201rev2Feed, get_tag_uri\n'), ((5447, 5470), 'pelican.utils.get_relative_path', 'get_relative_path', (['path'], {}), '(path)\n', (5464, 5470), False, 'from pelican.utils import get_relative_path, is_selected_for_writing, path_to_url, sanitised_join, set_date_tzinfo\n'), ((9704, 9754), 'pelican.paginator.Paginator', 'Paginator', (['name', 'url', 'val', 'self.settings', 'per_page'], {}), '(name, url, val, self.settings, per_page)\n', (9713, 9754), False, 'from pelican.paginator import Paginator\n'), ((6108, 6138), 'os.path.dirname', 'os.path.dirname', (['complete_path'], {}), '(complete_path)\n', (6123, 6138), False, 'import os\n'), ((7661, 7697), 'os.path.join', 'os.path.join', (['self.output_path', 'name'], {}), '(self.output_path, name)\n', (7673, 7697), False, 'import os\n'), ((8284, 8305), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (8299, 8305), False, 'import os\n'), ((8963, 8986), 'pelican.utils.get_relative_path', 'get_relative_path', (['name'], {}), '(name)\n', (8980, 8986), False, 'from pelican.utils import get_relative_path, is_selected_for_writing, path_to_url, sanitised_join, set_date_tzinfo\n'), ((1344, 1362), 'markupsafe.Markup', 'Markup', (['feed_title'], {}), '(feed_title)\n', (1350, 1362), False, 'from markupsafe import Markup\n')]
|
"""Basic message handler."""
import json
from ...base_handler import BaseHandler, BaseResponder, RequestContext
from ..manager import PresentationManager
from ..messages.presentation_request import PresentationRequest
from ....holder.base import BaseHolder
class PresentationRequestHandler(BaseHandler):
"""Message handler class for presentation requests."""
async def handle(self, context: RequestContext, responder: BaseResponder):
"""
Message handler logic for presentation requests.
Args:
context: request context
responder: responder callback
"""
self._logger.debug(f"PresentationRequestHandler called with context {context}")
assert isinstance(context.message, PresentationRequest)
self._logger.info("Received presentation request: %s", context.message.request)
presentation_manager = PresentationManager(context)
presentation_exchange_record = await presentation_manager.receive_request(
context.message, context.connection_record.connection_id
)
# If auto_respond_presentation_request is set, try to build a presentation
# This will fail and bail out if there isn't exactly one credential returned
# for each requested attribute and predicate. All credential data will be
# revealed.
if context.settings.get("auto_respond_presentation_request"):
holder: BaseHolder = await context.inject(BaseHolder)
credentials_for_presentation = {
"self_attested_attributes": {},
"requested_attributes": {},
"requested_predicates": {},
}
presentation_request = json.loads(context.message.request)
for referent in presentation_request["requested_attributes"]:
(
credentials
) = await holder.get_credentials_for_presentation_request_by_referent(
presentation_request, referent, 0, 2, {}
)
if len(credentials) != 1:
self._logger.warning(
f"Could not automatically construct presentation for"
+ f" presentation request {presentation_request['name']}"
+ f":{presentation_request['version']} because referent "
+ f"{referent} did not produce exactly one credential result."
+ f" {len(credentials)} credentials were returned from the "
+ f"wallet."
)
return
credentials_for_presentation["requested_attributes"][referent] = {
"cred_id": credentials[0]["cred_info"]["referent"],
"revealed": True,
}
for referent in presentation_request["requested_predicates"]:
(
credentials
) = await holder.get_credentials_for_presentation_request_by_referent(
presentation_request, referent, 0, 2, {}
)
if len(credentials) != 1:
self._logger.warning(
f"Could not automatically construct presentation for"
+ f" presentation request {presentation_request['name']}"
+ f":{presentation_request['version']} because referent "
+ f"{referent} did not produce exactly one credential result."
+ f" {len(credentials)} credentials were returned from the "
+ f"wallet."
)
return
credentials_for_presentation["requested_predicates"][referent] = {
"cred_id": credentials[0]["cred_info"]["referent"],
"revealed": True,
}
(
presentation_exchange_record,
presentation_message,
) = await presentation_manager.create_presentation(
presentation_exchange_record, credentials_for_presentation
)
await responder.send_reply(presentation_message)
|
[
"json.loads"
] |
[((1728, 1763), 'json.loads', 'json.loads', (['context.message.request'], {}), '(context.message.request)\n', (1738, 1763), False, 'import json\n')]
|
import glob
import numpy as np
def _word_to_bool(word):
"""convert a string to boolean according the first 2 characters."""
_accepted_bool_prefixes = ("T", ".T")
return word.upper().startswith(_accepted_bool_prefixes)
class Photons:
pass
class Wavelengths:
pass
class Physics:
pass
class Dust:
component = []
pass
class DustComponent:
pass
class Grid:
pass
class Map:
pass
class Zone:
dust = []
pass
class Mol:
molecule = []
pass
class Molecule:
pass
class Star:
pass
class Simu:
version = float()
pass
class Params:
simu = Simu()
phot = Photons()
wavelengths = Wavelengths()
map = Map()
grid = Grid()
zones = []
mol = Mol()
stars = []
_minimum_version = 3.0
def __init__(self, filename=None, **kwargs):
self.filename = filename
self._read(**kwargs)
def _read(self):
with open(self.filename, mode="rt") as file:
f = []
# Reading file and removing comments
for line in file:
# Skipping comments and empty lines
if (not line.startswith("#")) and (len(line.strip()) > 0):
f += [line]
f = iter(f)
# -- Version of the parameter file --
line = next(f).split()
self.simu.version = float(line[0])
if self.simu.version < self._minimum_version - 1e-3:
print("Parameter file version is ", self.simu.version)
raise Exception(
'Parameter file version must be at least {ver:.2f}'.format(
ver=self._minimum_version
)
)
# -- Number of photon packages --
# to support float notations (e.g. "1.28e8" or "64000.0"),
# we read as float but convert to int
line = next(f).split()
self.phot.nphot_T = int(float(line[0]))
line = next(f).split()
self.phot.nphot_SED = int(float(line[0]))
line = next(f).split()
self.phot.nphot_image = int(float(line[0]))
# -- Wavelengths --
line = next(f).split()
self.wavelengths.n_wl = int(line[0])
self.wavelengths.wl_min = float(line[1])
self.wavelengths.wl_max = float(line[2])
line = next(f).split()
self.simu.compute_T = _word_to_bool(line[0])
self.simu.compute_SED = _word_to_bool(line[1])
self.simu.use_default_wl = _word_to_bool(line[2])
line = next(f).split()
self.wavelengths.file = line[0]
line = next(f).split()
self.simu.separate_contrib = _word_to_bool(line[0])
self.simu.separate_pola = _word_to_bool(line[1])
# -- Grid --
line = next(f).split()
self.grid.type = int(line[0])
line = next(f).split()
self.grid.n_rad = int(line[0])
self.grid.nz = int(line[1])
self.grid.n_az = int(line[2])
self.grid.n_rad_in = int(line[3])
# -- Maps --
line = next(f).split()
self.map.nx = int(line[0])
self.map.ny = int(line[1])
self.map.size = float(line[2])
line = next(f).split()
self.map.RT_imin = float(line[0])
self.map.RT_imax = float(line[1])
self.map.RT_ntheta = int(line[2])
self.map.lRT_centered = _word_to_bool(line[3])
line = next(f).split()
self.map.RT_az_min = float(line[0])
self.map.RT_az_max = float(line[1])
self.map.RT_n_az = int(line[2])
line = next(f).split()
self.map.distance = float(line[0])
line = next(f).split()
self.map.PA = float(line[0])
# -- Scattering method --
line = next(f).split()
self.simu.scattering_method = int(line[0])
line = next(f).split()
self.simu.phase_function_method = int(line[0])
# -- Symetries --
line = next(f).split()
self.simu.image_symmetry = _word_to_bool(line[0])
line = next(f).split()
self.simu.central_symmetry = _word_to_bool(line[0])
line = next(f).split()
self.simu.axial_symmetry = _word_to_bool(line[0])
# -- Disk physics --
line = next(f).split()
self.simu.dust_settling_type = int(line[0])
self.simu.dust_settling_exp = float(line[1])
self.simu.a_settling = float(line[2])
line = next(f).split()
self.simu.radial_migration = _word_to_bool(line[0])
line = next(f).split()
self.simu.dust_sublimation = _word_to_bool(line[0])
line = next(f).split()
self.simu.hydrostatic_eq = _word_to_bool(line[0])
line = next(f).split()
self.simu.viscous_heating = _word_to_bool(line[0])
self.simu.viscosity = float(line[1])
# -- Number of zones --
line = next(f).split()
n_zones = int(line[0])
self.simu.n_zones = n_zones
# -- Density structure --
z = Zone()
for k in range(n_zones):
self.zones.append(z)
line = next(f).split()
self.zones[k].geometry = int(line[0])
line = next(f).split()
self.zones[k].dust_mass = float(line[0])
self.zones[k].gas_to_dust_ratio = float(line[1])
line = next(f).split()
self.zones[k].h0 = float(line[0])
self.zones[k].Rref = float(line[1])
self.zones[k].vertical_exp = float(line[2])
line = next(f).split()
self.zones[k].Rin = float(line[0])
self.zones[k].edge = float(line[1])
self.zones[k].Rout = float(line[2])
self.zones[k].Rc = float(line[3])
line = next(f).split()
self.zones[k].flaring_exp = float(line[0])
line = next(f).split()
self.zones[k].surface_density_exp = float(line[0])
self.zones[k].m_gamma_exp = float(line[1])
# -- Grain properties --
d = Dust
for k in range(n_zones):
line = next(f).split()
n_species = int(line[0])
self.zones[k].n_species = n_species
for j in range(n_species):
self.zones[k].dust.append(d)
line = next(f).split()
self.zones[k].dust[j].type = line[0]
n_components = int(line[1])
self.zones[k].dust[j].n_components = n_components
self.zones[k].dust[j].mixing_rule = int(line[2])
self.zones[k].dust[j].porosity = float(line[3])
self.zones[k].dust[j].mass_fraction = float(line[4])
self.zones[k].dust[j].DHS_Vmax = float(line[5])
c = DustComponent()
for l in range(n_components):
self.zones[k].dust[j].component.append(c)
line = next(f).split()
self.zones[k].dust[j].component[l].file = line[0]
self.zones[k].dust[j].component[l].volume_fraction = float(line[1])
line = next(f).split()
self.zones[k].dust[j].heating_method = int(line[0])
line = next(f).split()
self.zones[k].dust[j].amin = float(line[0])
self.zones[k].dust[j].amax = float(line[1])
self.zones[k].dust[j].aexp = float(line[2])
self.zones[k].dust[j].n_grains = int(line[3])
# -- Molecular settings --
line = next(f).split()
self.mol.compute_pop = _word_to_bool(line[0])
self.mol.compute_pop_accurate = _word_to_bool(line[1])
self.mol.LTE = _word_to_bool(line[2])
self.mol.profile_width = float(line[3])
line = next(f).split()
self.mol.v_turb = float(line[0])
line = next(f).split()
n_mol = int(line[0])
self.mol.n_mol = n_mol
m = Molecule()
for k in range(n_mol):
self.mol.molecule.append(m)
line = next(f).split()
self.mol.molecule[k].file = line[0]
self.mol.molecule[k].level_max = int(line[1])
line = next(f).split()
self.mol.molecule[k].v_max = float(line[0])
self.mol.molecule[k].nv = int(line[1])
line = next(f).split()
self.mol.molecule[k].cst_abundance = _word_to_bool(line[0])
self.mol.molecule[k].abundance = line[1]
self.mol.molecule[k].abundance_file = line[2]
line = next(f).split()
self.mol.molecule[k].ray_tracing = _word_to_bool(line[0])
nTrans = int(line[1])
self.mol.molecule[k].n_trans = nTrans
line = next(f).split()
self.mol.molecule[k].transitions = list(
map(int, line[0:nTrans])
) # convert list of str to int
# -- Star properties --
line = next(f).split()
n_stars = int(line[0])
self.simu.n_stars = n_stars
s = Star()
for k in range(n_stars):
self.stars.append(s)
line = next(f).split()
self.stars[k].Teff = float(line[0])
self.stars[k].R = float(line[1])
self.stars[k].M = float(line[2])
self.stars[k].x = float(line[3])
self.stars[k].y = float(line[4])
self.stars[k].z = float(line[5])
self.stars[k].is_bb = _word_to_bool(line[6])
line = next(f).split()
self.stars[k].file = line[0]
line = next(f).split()
self.stars[k].fUV = float(line[0])
self.stars[k].slope_UV = float(line[1])
# -- Command line options --
for line in f:
if (len(line) > 0):
line = line.split()
if (len(line) > 0): # we test again in case there were only spaces
if (line[0] == "Executed"):
self.options = " ".join(line[6:])
if (line[0] == "sha"):
self.mcfost_sha = line[2]
def __str__(self):
""" Return a formatted parameter file. Currently returns v3.0 format
"""
# -- Photon packets --
txt = f"""3.0 mcfost version\n
#-- Number of photon packages --
{self.phot.nphot_T:<10.5g} nbr_photons_eq_th : T computation
{self.phot.nphot_SED:<10.5g} nbr_photons_lambda : SED computation
{self.phot.nphot_image:<10.5g} nbr_photons_image : images computation\n\n"""
# -- Wavelengths --
txt += f"""#-- Wavelength --
{self.wavelengths.n_wl:<4d} {self.wavelengths.wl_min:<5.1f} {self.wavelengths.wl_max:<7g} n_lambda, lambda_min, lambda_max [microns]
{self.simu.compute_T} {self.simu.compute_SED} {self.simu.use_default_wl} compute temperature?, compute sed?, use default wavelength grid ?
{self.wavelengths.file} wavelength file (if previous parameter is F)
{self.simu.separate_contrib} {self.simu.separate_pola} separation of different contributions?, stokes parameters?\n\n"""
# -- Grid --
txt += f"""#-- Grid geometry and size --
{self.grid.type:>1d} 1 = cylindrical, 2 = spherical
{self.grid.n_rad} {self.grid.nz} {self.grid.n_az} {self.grid.n_rad_in} n_rad (log distribution), nz (or n_theta), n_az, n_rad_in\n\n"""
# -- Maps --
txt += f"""#-- Maps --
{self.map.nx} {self.map.ny} {self.map.size:5.1f} grid (nx,ny), size [au]
{self.map.RT_imin:<4.1f} {self.map.RT_imax:<4.1f} {self.map.RT_ntheta:>2d} {self.map.lRT_centered} RT: imin, imax, n_incl, centered ?
{self.map.RT_az_min:<4.1f} {self.map.RT_az_max:<4.1f} {self.map.RT_n_az:>2d} RT: az_min, az_max, n_az
{self.map.distance:<6.2f} distance (pc)
{self.map.PA:<6.2f} disk PA\n\n"""
# -- Scattering method --
txt += f"""#-- Scattering method --
{self.simu.scattering_method} 0=auto, 1=grain prop, 2=cell prop
{self.simu.phase_function_method} 1=Mie, 2=hg (2 implies the loss of polarizarion)\n\n"""
# -- Symetries --
txt += f"""#-- Symmetries --
{self.simu.image_symmetry} image symmetry
{self.simu.central_symmetry} central symmetry
{self.simu.axial_symmetry} axial symmetry (important only if N_phi > 1)\n\n"""
# -- Disk physics --
txt += f"""#Disk physics
{self.simu.dust_settling_type} {self.simu.dust_settling_exp:<6.2f} {self.simu.a_settling:<6.2f} dust_settling (0=no settling, 1=parametric, 2=Dubrulle, 3=Fromang), exp_strat, a_strat (for parametric settling)
{self.simu.radial_migration} dust radial migration
{self.simu.dust_sublimation} sublimate dust
{self.simu.hydrostatic_eq} hydrostatic equilibrium
{self.simu.viscous_heating} {self.simu.viscosity:4.1g} viscous heating, alpha_viscosity\n\n"""
# -- Number of zones --
txt += f"""#-- Number of zones -- 1 zone = 1 density structure + corresponding grain properties
{self.simu.n_zones}\n\n"""
# -- Density structure --
txt += f"#-- Density structure --\n"
for k in range(self.simu.n_zones):
txt += f""" {self.zones[k].geometry} zone type : 1 = disk, 2 = tapered-edge disk, 3 = envelope, 4 = debris disk, 5 = wall
{self.zones[k].dust_mass:<10.2e} {self.zones[k].gas_to_dust_ratio:<5.1f} dust mass, gas-to-dust mass ratio
{self.zones[k].h0:<5.1f} {self.zones[k].Rref:<6.1f} {self.zones[k].vertical_exp:<6.1f} scale height, reference radius (AU), unused for envelope, vertical profile exponent (only for debris disk)
{self.zones[k].Rin:<6.1f} {self.zones[k].edge:<6.1f} {self.zones[k].Rout:<6.1f} {self.zones[k].Rc:<6.1f} Rin, edge, Rout, Rc (AU) Rc is only used for tappered-edge & debris disks (Rout set to 8*Rc if Rout==0)
{self.zones[k].flaring_exp:<8.3f} flaring exponent, unused for envelope
{self.zones[k].surface_density_exp} {self.zones[k].m_gamma_exp} surface density exponent (or -gamma for tappered-edge disk or volume density for envelope), usually < 0, -gamma_exp (or alpha_in & alpha_out for debris disk)\n\n"""
txt += f"\n"
# -- Grain properties --
txt += f"#-- Grain properties --\n"
for k in range(self.simu.n_zones):
txt += (
f" {self.zones[k].n_species} Number of species\n"
)
for j in range(self.zones[k].n_species):
txt += f" Mie {self.zones[k].dust[j].n_components} {self.zones[k].dust[j].mixing_rule} {self.zones[k].dust[j].porosity:<5.2f} {self.zones[k].dust[j].mass_fraction:<5.2f} {self.zones[k].dust[j].DHS_Vmax} Grain type (Mie or DHS), N_components, mixing rule (1 = EMT or 2 = coating), porosity, mass fraction, Vmax (for DHS)\n"
for l in range(self.zones[k].dust[j].n_components):
txt += f" {self.zones[k].dust[j].component[l].file} {self.zones[k].dust[j].component[l].volume_fraction} Optical indices file, volume fraction\n"
txt += f""" {self.zones[k].dust[j].heating_method} Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE
{self.zones[k].dust[j].amin} {self.zones[k].dust[j].amax} {self.zones[k].dust[j].aexp} {self.zones[k].dust[j].n_grains} amin, amax, aexp, nbr_grains\n\n"""
# -- Molecular settings --
txt += f"""#-- Molecular RT settings --
{self.mol.compute_pop} {self.mol.compute_pop_accurate} {self.mol.LTE} {self.mol.profile_width} lpop, laccurate_pop, LTE, profile width
{self.mol.v_turb} v_turb [km/s]
{self.mol.n_mol} nmol\n"""
for k in range(self.mol.n_mol):
txt += f""" {self.mol.molecule[k].file} {self.mol.molecule[k].level_max} molecular data filename, level_max
{self.mol.molecule[k].v_max} {self.mol.molecule[k].nv} vmax (km.s-1), n_speed
{self.mol.molecule[k].cst_abundance} {self.mol.molecule[k].abundance} {self.mol.molecule[k].abundance_file} cst molecule abundance ?, abundance, abundance file
{self.mol.molecule[k].ray_tracing} {self.mol.molecule[k].n_trans} ray tracing ?, number of lines in ray-tracing\n """
for j in range(self.mol.molecule[k].n_trans):
txt += f" {self.mol.molecule[k].transitions[j]}"
txt += f" transition numbers\n"
txt += f"\n"
# -- Star properties --
txt += f"""#-- Star properties --
{self.simu.n_stars} Number of stars\n"""
for k in range(self.simu.n_stars):
txt += f""" {self.stars[k].Teff} {self.stars[k].R} {self.stars[k].M} {self.stars[k].x} {self.stars[k].y} {self.stars[k].x} {self.stars[k].is_bb} Temp, radius (solar radius),M (solar mass),x,y,z (AU), is a blackbody?
{self.stars[k].file}
{self.stars[k].fUV} {self.stars[k].slope_UV} fUV, slope_UV\n"""
return txt
def writeto(self, outname):
""" Write an MCFOST parameter file to disk. """
with open(outname, mode="wt") as file:
file.write(str(self))
def calc_inclinations(self):
# Calculate the inclinations for the ray-traced SEDs and images
if self.map.RT_ntheta == 1:
return self.map.RT_imin
else:
cos_min, cos_max = np.cos(np.deg2rad([self.map.RT_imin, self.map.RT_imax]))
if self.map.lRT_centered:
return (
np.rad2deg(np.arccos(
cos_min
+ (np.arange(self.map.RT_ntheta) + 0.5)
/ self.map.RT_ntheta
* (cos_max - cos_min)
))
)
else:
return (
np.rad2deg(np.arccos(
cos_min
+ (np.arange(self.map.RT_ntheta))
/ (self.map.RT_ntheta - 1)
* (cos_max - cos_min)
))
)
def find_parameter_file(directory="./"):
list = glob.glob(directory + "/*.par*")
if len(list) == 1:
return list[0]
elif len(list) > 1:
raise ValueError("Multiple parameter files found in " + directory)
else:
raise ValueError("No parameter files found in " + directory)
|
[
"numpy.deg2rad",
"numpy.arange",
"glob.glob"
] |
[((18379, 18411), 'glob.glob', 'glob.glob', (["(directory + '/*.par*')"], {}), "(directory + '/*.par*')\n", (18388, 18411), False, 'import glob\n'), ((17628, 17676), 'numpy.deg2rad', 'np.deg2rad', (['[self.map.RT_imin, self.map.RT_imax]'], {}), '([self.map.RT_imin, self.map.RT_imax])\n', (17638, 17676), True, 'import numpy as np\n'), ((18155, 18184), 'numpy.arange', 'np.arange', (['self.map.RT_ntheta'], {}), '(self.map.RT_ntheta)\n', (18164, 18184), True, 'import numpy as np\n'), ((17842, 17871), 'numpy.arange', 'np.arange', (['self.map.RT_ntheta'], {}), '(self.map.RT_ntheta)\n', (17851, 17871), True, 'import numpy as np\n')]
|
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions as rf_exceptions
from rest_framework import serializers
from waldur_core.structure.models import CUSTOMER_DETAILS_FIELDS
from waldur_core.structure.serializers import (
CountrySerializerMixin,
ProjectDetailsSerializerMixin,
)
from waldur_mastermind.marketplace import models as marketplace_models
from waldur_mastermind.marketplace.serializers import BaseItemSerializer
from . import models
class ReviewSerializerMixin(serializers.HyperlinkedModelSerializer):
state = serializers.ReadOnlyField(source='get_state_display')
uuid = serializers.ReadOnlyField(source='flow.uuid')
created = serializers.ReadOnlyField(source='flow.created')
requested_by_full_name = serializers.ReadOnlyField(
source='flow.requested_by.full_name'
)
reviewed_by_full_name = serializers.ReadOnlyField(source='reviewed_by.full_name')
class Meta:
model = models.ReviewMixin
extra_kwargs = {
'reviewed_by': {'lookup_field': 'uuid', 'view_name': 'user-detail'},
}
fields = (
'uuid',
'reviewed_by',
'reviewed_by_full_name',
'requested_by_full_name',
'reviewed_at',
'review_comment',
'state',
'created',
)
class CustomerCreateRequestSerializer(CountrySerializerMixin, ReviewSerializerMixin):
class Meta(ReviewSerializerMixin.Meta):
model = models.CustomerCreateRequest
fields = ReviewSerializerMixin.Meta.fields + CUSTOMER_DETAILS_FIELDS
class ProjectCreateRequestSerializer(
ProjectDetailsSerializerMixin, ReviewSerializerMixin
):
class Meta(ReviewSerializerMixin.Meta):
model = models.ProjectCreateRequest
fields = ReviewSerializerMixin.Meta.fields + ('name', 'description', 'end_date')
class ResourceCreateRequestSerializer(BaseItemSerializer, ReviewSerializerMixin):
uuid = serializers.ReadOnlyField(source='flow.uuid')
class Meta(BaseItemSerializer.Meta):
model = models.ResourceCreateRequest
fields = (
ReviewSerializerMixin.Meta.fields
+ BaseItemSerializer.Meta.fields
+ ('name', 'description', 'end_date')
)
extra_kwargs = {
**BaseItemSerializer.Meta.extra_kwargs,
'reviewed_by': {'lookup_field': 'uuid', 'view_name': 'user-detail'},
}
class FlowSerializer(serializers.HyperlinkedModelSerializer):
state = serializers.ReadOnlyField(source='get_state_display')
customer_create_request = CustomerCreateRequestSerializer(required=False)
customer_name = serializers.ReadOnlyField(source='customer.name')
project_create_request = ProjectCreateRequestSerializer()
resource_create_request = ResourceCreateRequestSerializer()
def get_fields(self):
fields = super().get_fields()
if self.instance is None:
return fields
try:
request = self.context['view'].request
except (KeyError, AttributeError):
return fields
if request.method in ('PUT', 'PATCH'):
fields['resource_create_request'] = ResourceCreateRequestSerializer(
instance=self.instance.resource_create_request
)
return fields
class Meta:
model = models.FlowTracker
fields = (
'uuid',
'url',
'customer',
'customer_name',
'order_item',
'customer_create_request',
'project_create_request',
'resource_create_request',
'state',
)
extra_kwargs = {
'url': {
'lookup_field': 'uuid',
'view_name': 'marketplace-resource-creation-flow-detail',
},
'customer': {'lookup_field': 'uuid', 'view_name': 'customer-detail'},
'order_item': {
'lookup_field': 'uuid',
'view_name': 'marketplace-order-item-detail',
},
}
read_only_fields = ('requested_by', 'order_item')
def create(self, validated_data):
request = self.context['request']
customer = validated_data.get('customer')
customer_create_request_data = validated_data.pop(
'customer_create_request', None
)
project_create_request_data = validated_data.pop('project_create_request')
resource_create_request_data = validated_data.pop('resource_create_request')
if not customer_create_request_data and not customer:
raise serializers.ValidationError(
_('Either customer_create_request or customer should be specified.')
)
if customer_create_request_data and customer:
raise serializers.ValidationError(
_('customer_create_request and customer are mutually exclusive.')
)
if (
customer
and not request.user.is_staff
and request.user not in customer.get_users()
):
raise serializers.ValidationError(
_('User is not connected to this customer.')
)
if not customer:
validated_data[
'customer_create_request'
] = models.CustomerCreateRequest.objects.create(
**customer_create_request_data
)
validated_data[
'project_create_request'
] = models.ProjectCreateRequest.objects.create(**project_create_request_data)
validated_data[
'resource_create_request'
] = models.ResourceCreateRequest.objects.create(**resource_create_request_data)
validated_data['requested_by'] = request.user
return super(FlowSerializer, self).create(validated_data)
def update(self, instance, validated_data):
for field in (
'customer_create_request',
'project_create_request',
'resource_create_request',
):
data = validated_data.pop(field, None)
section = getattr(instance, field)
if data:
for k, v in data.items():
setattr(section, k, v)
if section:
section.save()
return super().update(instance, validated_data)
class OfferingActivateRequestSerializer(serializers.HyperlinkedModelSerializer):
state = serializers.ReadOnlyField(source='get_state_display')
class Meta:
model = models.OfferingStateRequest
fields = (
'reviewed_by',
'reviewed_at',
'review_comment',
'state',
'created',
'url',
'uuid',
'offering',
'requested_by',
)
extra_kwargs = {
'url': {
'lookup_field': 'uuid',
'view_name': 'marketplace-offering-activate-request-detail',
},
'offering': {
'lookup_field': 'uuid',
'view_name': 'marketplace-offering-detail',
},
'reviewed_by': {'lookup_field': 'uuid', 'view_name': 'user-detail'},
'requested_by': {'lookup_field': 'uuid', 'view_name': 'user-detail'},
}
read_only_fields = (
'reviewed_by',
'reviewed_at',
'review_comment',
'state',
'created',
'url',
'uuid',
'requested_by',
'issue',
)
def create(self, validated_data):
request = self.context['request']
validated_data['requested_by'] = request.user
return super(OfferingActivateRequestSerializer, self).create(validated_data)
def validate_offering(self, offering):
if offering.state != marketplace_models.Offering.States.DRAFT:
raise rf_exceptions.ValidationError(_('Offering state must be draft.'))
request = self.context['request']
if models.OfferingStateRequest.objects.filter(
offering=offering,
requested_by=request.user,
state__in=(
models.OfferingStateRequest.States.DRAFT,
models.OfferingStateRequest.States.PENDING,
),
).exists():
raise rf_exceptions.ValidationError(
_('Pending request for this offering already exists.')
)
return offering
|
[
"rest_framework.serializers.ReadOnlyField",
"django.utils.translation.gettext_lazy"
] |
[((577, 630), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""get_state_display"""'}), "(source='get_state_display')\n", (602, 630), False, 'from rest_framework import serializers\n'), ((642, 687), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""flow.uuid"""'}), "(source='flow.uuid')\n", (667, 687), False, 'from rest_framework import serializers\n'), ((702, 750), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""flow.created"""'}), "(source='flow.created')\n", (727, 750), False, 'from rest_framework import serializers\n'), ((780, 843), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""flow.requested_by.full_name"""'}), "(source='flow.requested_by.full_name')\n", (805, 843), False, 'from rest_framework import serializers\n'), ((886, 943), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""reviewed_by.full_name"""'}), "(source='reviewed_by.full_name')\n", (911, 943), False, 'from rest_framework import serializers\n'), ((1990, 2035), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""flow.uuid"""'}), "(source='flow.uuid')\n", (2015, 2035), False, 'from rest_framework import serializers\n'), ((2537, 2590), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""get_state_display"""'}), "(source='get_state_display')\n", (2562, 2590), False, 'from rest_framework import serializers\n'), ((2689, 2738), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""customer.name"""'}), "(source='customer.name')\n", (2714, 2738), False, 'from rest_framework import serializers\n'), ((6492, 6545), 'rest_framework.serializers.ReadOnlyField', 'serializers.ReadOnlyField', ([], {'source': '"""get_state_display"""'}), "(source='get_state_display')\n", (6517, 6545), False, 'from rest_framework import serializers\n'), ((4697, 4765), 'django.utils.translation.gettext_lazy', '_', (['"""Either customer_create_request or customer should be specified."""'], {}), "('Either customer_create_request or customer should be specified.')\n", (4698, 4765), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4898, 4963), 'django.utils.translation.gettext_lazy', '_', (['"""customer_create_request and customer are mutually exclusive."""'], {}), "('customer_create_request and customer are mutually exclusive.')\n", (4899, 4963), True, 'from django.utils.translation import gettext_lazy as _\n'), ((5186, 5230), 'django.utils.translation.gettext_lazy', '_', (['"""User is not connected to this customer."""'], {}), "('User is not connected to this customer.')\n", (5187, 5230), True, 'from django.utils.translation import gettext_lazy as _\n'), ((7985, 8019), 'django.utils.translation.gettext_lazy', '_', (['"""Offering state must be draft."""'], {}), "('Offering state must be draft.')\n", (7986, 8019), True, 'from django.utils.translation import gettext_lazy as _\n'), ((8431, 8485), 'django.utils.translation.gettext_lazy', '_', (['"""Pending request for this offering already exists."""'], {}), "('Pending request for this offering already exists.')\n", (8432, 8485), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404
from .models import Author,Picture,Category,Location
# Create your views here.
def pics(request):
category = Category.get_categories()
pictures = Picture.all_pics()
location_pics = Location.get_location()
return render(request,'pics.html',{'pictures': pictures, 'category': category, 'location_pics':location_pics })
def single_pic(request,id):
try:
pic = Picture.objects.get(id = id)
except DoesNotExist:
raise Http404()
return render(request,"single_pic.html", {"pic":pic})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get('image')
searched_pics = Picture.search_by_name(search_term)
message = f'{search_term}'
return render(request,'search.html',{"message":message,"image":searched_pics})
else:
message = "You have not entered anything to search"
return render(request,'search.html',{"message":message})
def viewPics_by_location(request,location):
locationpic = Picture.view_pictures_by_location(location)
return render(request,"location_pics.html",{"locationpic":locationpic})
def viewPics_by_category(request,category):
photos =Picture.view_pictures_by_category(category)
return render (request,'category.html',{"photos":photos})
|
[
"django.shortcuts.render",
"django.http.Http404"
] |
[((321, 431), 'django.shortcuts.render', 'render', (['request', '"""pics.html"""', "{'pictures': pictures, 'category': category, 'location_pics': location_pics}"], {}), "(request, 'pics.html', {'pictures': pictures, 'category': category,\n 'location_pics': location_pics})\n", (327, 431), False, 'from django.shortcuts import render, redirect\n'), ((568, 616), 'django.shortcuts.render', 'render', (['request', '"""single_pic.html"""', "{'pic': pic}"], {}), "(request, 'single_pic.html', {'pic': pic})\n", (574, 616), False, 'from django.shortcuts import render, redirect\n'), ((1185, 1252), 'django.shortcuts.render', 'render', (['request', '"""location_pics.html"""', "{'locationpic': locationpic}"], {}), "(request, 'location_pics.html', {'locationpic': locationpic})\n", (1191, 1252), False, 'from django.shortcuts import render, redirect\n'), ((1363, 1415), 'django.shortcuts.render', 'render', (['request', '"""category.html"""', "{'photos': photos}"], {}), "(request, 'category.html', {'photos': photos})\n", (1369, 1415), False, 'from django.shortcuts import render, redirect\n'), ((859, 935), 'django.shortcuts.render', 'render', (['request', '"""search.html"""', "{'message': message, 'image': searched_pics}"], {}), "(request, 'search.html', {'message': message, 'image': searched_pics})\n", (865, 935), False, 'from django.shortcuts import render, redirect\n'), ((1017, 1069), 'django.shortcuts.render', 'render', (['request', '"""search.html"""', "{'message': message}"], {}), "(request, 'search.html', {'message': message})\n", (1023, 1069), False, 'from django.shortcuts import render, redirect\n'), ((547, 556), 'django.http.Http404', 'Http404', ([], {}), '()\n', (554, 556), False, 'from django.http import HttpResponse, Http404\n')]
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Tuple, List, Dict, Any
import numpy as np
from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation
STAT_FIELDS = ["relabelling_score", "ambiguity", "label_correctness"]
@dataclass(frozen=True)
class SelectionType(Enum):
"""
Defines the 5 possible types of selections that can be made in an iteration
"""
MISLABELLED_CASE_SELECTED_CORRECTED = 1
MISLABELLED_CASE_SELECTED_NOT_CORRECTED = 2
AMBIGUOUS_CASE_SELECTED_CORRECTED = 3
AMBIGUOUS_CASE_SELECTED_NOT_CORRECTED = 4
CLEAN_CASE_SELECTED = 5
def compute_selection_type_of_current_iter(sample_id: int,
true_ambiguous_cases: np.ndarray,
true_label_counts: np.ndarray,
mislabelled_ids_current: np.ndarray,
ambiguous_case_ids_current: np.ndarray,
mislabelled_ids_prev: np.ndarray,
ambiguous_case_ids_prev: np.ndarray) -> SelectionType:
"""
Compute the type of selection that occurred between the previous and current iteration.
:param sample_id: The sample id.
:param true_ambiguous_cases: The ids for the true ambiguous samples.
:param true_label_counts: The label counts for the true label distribution.
:param mislabelled_ids_current: The ids for the current iteration remaining not ambiguous mislabelled samples.
:param ambiguous_case_ids_current: The ids for the current iteration remaining ambiguous mislabelled samples.
:param mislabelled_ids_prev: The ids for the previous iteration remaining not ambiguous mislabelled samples.
:param ambiguous_case_ids_prev: The ids for the previous iteration remaining ambiguous mislabelled samples.
:return: An enum representing the selection type that occurred between the previous and current iteration.
"""
if sample_id in true_ambiguous_cases:
if len(set(ambiguous_case_ids_prev) - set(ambiguous_case_ids_current)) > 0:
return SelectionType.AMBIGUOUS_CASE_SELECTED_CORRECTED
else:
return SelectionType.AMBIGUOUS_CASE_SELECTED_NOT_CORRECTED
else:
if len(set(mislabelled_ids_prev) - set(mislabelled_ids_current)) > 0:
return SelectionType.MISLABELLED_CASE_SELECTED_CORRECTED
elif len(np.unique(np.where(true_label_counts[sample_id])[0])) == 1:
return SelectionType.CLEAN_CASE_SELECTED
else:
return SelectionType.MISLABELLED_CASE_SELECTED_NOT_CORRECTED
def get_mislabelled_sample_ids(true_label_counts: np.ndarray, current_label_counts: np.ndarray) -> np.ndarray:
"""
Compute which samples are mislabelled.
:param true_label_counts: The label counts for the true label distribution.
:param current_label_counts: The label counts for the current distribution.
:return: An array with the ids of the mislabeled samples (majority voting)
"""
true_class = np.argmax(true_label_counts, axis=1)
current_class = np.argmax(current_label_counts, axis=1)
return np.where(true_class != current_class)
def get_ambiguous_sample_ids(true_label_counts: np.ndarray, threshold: float = 0.30) -> np.ndarray:
"""
Compute which samples are ambiguous
:param true_label_counts: The label counts for the true label distribution.
:param threshold: The label entropy threshold above which a sample is considered ambiguous
:return: An array with the ids of the ambiguous samples
"""
label_entropy = compute_label_entropy(true_label_counts)
return np.where(label_entropy > threshold)[0]
class SimulationStats:
"""
A class that keeps track of statistics/metrics during the simulation
"""
def __init__(self, name: str, true_label_counts: np.ndarray, initial_labels: np.ndarray):
"""
:param name: The name of the simulation
:param true_label_counts: The label counts for the true label distribution
np.ndarray [num_samples x num_classes]
:param initial_labels: The initial label counts, np.ndarray [num_samples x num_classes]
"""
self.name = name
self.initial_labels = np.copy(initial_labels)
self.true_label_counts = true_label_counts
self.true_ambiguous_cases = get_ambiguous_sample_ids(true_label_counts)
self.true_distribution = true_label_counts / np.sum(true_label_counts, axis=-1, keepdims=True)
self.selected_sample_id: List[int] = list()
self.num_fetches: List[int] = list()
self.accuracy: List[float] = list()
self.avg_total_variation: List[float] = list()
self.selection_type: List[SelectionType] = list()
self.selector_stats: Dict[str, Any] = {key: list() for key in STAT_FIELDS}
mislabelled_ids_current, ambiguous_case_ids_current = self.get_noisy_and_ambiguous_cases(initial_labels)
self.mislabelled_not_ambiguous_sample_ids = [mislabelled_ids_current]
self.mislabelled_ambiguous_sample_ids = [ambiguous_case_ids_current]
self.num_initial_mislabelled_not_ambiguous = self.mislabelled_not_ambiguous_sample_ids[0].size
self.num_initial_mislabelled_ambiguous = self.mislabelled_ambiguous_sample_ids[0].size
self.num_remaining_mislabelled_not_ambiguous: List[int] = list()
self.num_remaining_mislabelled_ambiguous: List[int] = list()
def get_noisy_and_ambiguous_cases(self, current_label_counts: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute which of the current labels are still mislabelled, separate the former into ambiguous and not ambiguous
samples
:param current_label_counts: The label counts of the current iteration
:return: A tuple containing an array with the current mislabelled not ambiguous sample ids and an array with
the current mislabelled ambiguous sample ids.
"""
# Find the potential label noise and ambiguous cases
label_mismatch_ids_current = get_mislabelled_sample_ids(self.true_label_counts, current_label_counts)
# Split the label mismatch cases into ambiguous and clear label noise types
mislabelled_ids_current = np.setdiff1d(label_mismatch_ids_current, self.true_ambiguous_cases)
ambiguous_case_ids_current = np.array(np.intersect1d(label_mismatch_ids_current, self.true_ambiguous_cases))
return mislabelled_ids_current, ambiguous_case_ids_current
def record_selector_stats(self, selector_stats: Dict[str, Any]) -> None:
"""
"""
if len(selector_stats) == 0:
return
for key in STAT_FIELDS:
if key in selector_stats:
self.selector_stats[key].append(selector_stats[key])
def record_iteration(self, selected_sample_id: int, num_fetches: int, current_label_counts: np.ndarray) -> None:
"""
:param selected_sample_id: The sample id that was selected at this iteration
:param num_fetches: The number of fetches (relabels) it took to achieve a majority
:param current_label_counts: The labels counts for the current iteration
:return:
"""
self.selected_sample_id.append(selected_sample_id)
self.num_fetches.append(num_fetches)
self.accuracy.append(compute_accuracy(current_label_counts, self.true_label_counts))
current_distribution = current_label_counts / np.sum(current_label_counts, axis=-1, keepdims=True)
self.avg_total_variation.append(np.nanmean(total_variation(self.true_distribution, current_distribution)))
mislabelled_ids_current, ambiguous_case_ids_current = self.get_noisy_and_ambiguous_cases(current_label_counts)
mislabelled_ids_prev = self.mislabelled_not_ambiguous_sample_ids[-1]
ambiguous_case_ids_prev = self.mislabelled_ambiguous_sample_ids[-1]
selection_type = compute_selection_type_of_current_iter(selected_sample_id,
self.true_ambiguous_cases,
self.true_label_counts,
mislabelled_ids_current, ambiguous_case_ids_current,
mislabelled_ids_prev, ambiguous_case_ids_prev)
self.selection_type.append(selection_type)
self.num_remaining_mislabelled_not_ambiguous.append(len(mislabelled_ids_current))
self.num_remaining_mislabelled_ambiguous.append(len(ambiguous_case_ids_current))
self.mislabelled_not_ambiguous_sample_ids.append(mislabelled_ids_current)
self.mislabelled_ambiguous_sample_ids.append(ambiguous_case_ids_current)
def log_last_iter(self) -> None:
"""
Log the statistics of the last iteration
:return: None
"""
logging.info(f"Method: {self.name}, selected_id: {self.selected_sample_id[-1]} "
f"accuracy: {self.accuracy[-1]}")
logging.info(f"Remaining label clear noise cases: {self.num_remaining_mislabelled_not_ambiguous[-1]} "
f"and ambiguous noise cases: {self.num_remaining_mislabelled_ambiguous[-1]}")
class SimulationStatsDistribution(object):
"""
A class that takes a list of simulation statistics and creates a distribution over them.
"""
def __init__(self, simulation_stats_list: List[SimulationStats]):
"""
:param simulation_stats_list: A list of SimulationStats objects
"""
self.simulation_stats = simulation_stats_list
end_point = max([np.max(np.cumsum(sim_stats.num_fetches)) for sim_stats in simulation_stats_list])
start_point = min([np.min(np.cumsum(sim_stats.num_fetches)) for sim_stats in simulation_stats_list])
self.num_initial_mislabelled_not_ambiguous = simulation_stats_list[0].num_initial_mislabelled_not_ambiguous
self.num_initial_mislabelled_ambiguous = simulation_stats_list[0].num_initial_mislabelled_ambiguous
self.name = simulation_stats_list[0].name
self.num_fetches = np.arange(start_point, end_point)
self.accuracy = self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list, 'accuracy')
self.avg_total_variation = self._interpolate_and_make_dist_array(
self.num_fetches, simulation_stats_list, 'avg_total_variation')
self.num_remaining_mislabelled_not_ambiguous =\
self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list,
'num_remaining_mislabelled_not_ambiguous')
self.num_remaining_mislabelled_ambiguous = \
self._interpolate_and_make_dist_array(self.num_fetches, simulation_stats_list,
'num_remaining_mislabelled_ambiguous')
@staticmethod
def _interpolate_and_make_dist_array(num_fetches: np.ndarray,
simulation_stats_list: List[SimulationStats],
fp_attr_name: str) -> np.ndarray:
return np.array([np.interp(num_fetches, np.cumsum(sim_stats.num_fetches),
sim_stats.__getattribute__(fp_attr_name)) for sim_stats in simulation_stats_list])
|
[
"numpy.sum",
"InnerEyeDataQuality.evaluation.metrics.compute_accuracy",
"numpy.copy",
"numpy.argmax",
"InnerEyeDataQuality.evaluation.metrics.total_variation",
"numpy.setdiff1d",
"logging.info",
"InnerEyeDataQuality.evaluation.metrics.compute_label_entropy",
"numpy.where",
"numpy.arange",
"numpy.cumsum",
"numpy.intersect1d",
"dataclasses.dataclass"
] |
[((658, 680), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (667, 680), False, 'from dataclasses import dataclass\n'), ((3503, 3539), 'numpy.argmax', 'np.argmax', (['true_label_counts'], {'axis': '(1)'}), '(true_label_counts, axis=1)\n', (3512, 3539), True, 'import numpy as np\n'), ((3560, 3599), 'numpy.argmax', 'np.argmax', (['current_label_counts'], {'axis': '(1)'}), '(current_label_counts, axis=1)\n', (3569, 3599), True, 'import numpy as np\n'), ((3611, 3648), 'numpy.where', 'np.where', (['(true_class != current_class)'], {}), '(true_class != current_class)\n', (3619, 3648), True, 'import numpy as np\n'), ((4061, 4101), 'InnerEyeDataQuality.evaluation.metrics.compute_label_entropy', 'compute_label_entropy', (['true_label_counts'], {}), '(true_label_counts)\n', (4082, 4101), False, 'from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation\n'), ((4113, 4148), 'numpy.where', 'np.where', (['(label_entropy > threshold)'], {}), '(label_entropy > threshold)\n', (4121, 4148), True, 'import numpy as np\n'), ((4740, 4763), 'numpy.copy', 'np.copy', (['initial_labels'], {}), '(initial_labels)\n', (4747, 4763), True, 'import numpy as np\n'), ((6757, 6824), 'numpy.setdiff1d', 'np.setdiff1d', (['label_mismatch_ids_current', 'self.true_ambiguous_cases'], {}), '(label_mismatch_ids_current, self.true_ambiguous_cases)\n', (6769, 6824), True, 'import numpy as np\n'), ((9443, 9563), 'logging.info', 'logging.info', (['f"""Method: {self.name}, selected_id: {self.selected_sample_id[-1]} accuracy: {self.accuracy[-1]}"""'], {}), "(\n f'Method: {self.name}, selected_id: {self.selected_sample_id[-1]} accuracy: {self.accuracy[-1]}'\n )\n", (9455, 9563), False, 'import logging\n'), ((9587, 9773), 'logging.info', 'logging.info', (['f"""Remaining label clear noise cases: {self.num_remaining_mislabelled_not_ambiguous[-1]} and ambiguous noise cases: {self.num_remaining_mislabelled_ambiguous[-1]}"""'], {}), "(\n f'Remaining label clear noise cases: {self.num_remaining_mislabelled_not_ambiguous[-1]} and ambiguous noise cases: {self.num_remaining_mislabelled_ambiguous[-1]}'\n )\n", (9599, 9773), False, 'import logging\n'), ((10683, 10716), 'numpy.arange', 'np.arange', (['start_point', 'end_point'], {}), '(start_point, end_point)\n', (10692, 10716), True, 'import numpy as np\n'), ((4948, 4997), 'numpy.sum', 'np.sum', (['true_label_counts'], {'axis': '(-1)', 'keepdims': '(True)'}), '(true_label_counts, axis=-1, keepdims=True)\n', (4954, 4997), True, 'import numpy as np\n'), ((6871, 6940), 'numpy.intersect1d', 'np.intersect1d', (['label_mismatch_ids_current', 'self.true_ambiguous_cases'], {}), '(label_mismatch_ids_current, self.true_ambiguous_cases)\n', (6885, 6940), True, 'import numpy as np\n'), ((7857, 7919), 'InnerEyeDataQuality.evaluation.metrics.compute_accuracy', 'compute_accuracy', (['current_label_counts', 'self.true_label_counts'], {}), '(current_label_counts, self.true_label_counts)\n', (7873, 7919), False, 'from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation\n'), ((7975, 8027), 'numpy.sum', 'np.sum', (['current_label_counts'], {'axis': '(-1)', 'keepdims': '(True)'}), '(current_label_counts, axis=-1, keepdims=True)\n', (7981, 8027), True, 'import numpy as np\n'), ((8079, 8140), 'InnerEyeDataQuality.evaluation.metrics.total_variation', 'total_variation', (['self.true_distribution', 'current_distribution'], {}), '(self.true_distribution, current_distribution)\n', (8094, 8140), False, 'from InnerEyeDataQuality.evaluation.metrics import compute_accuracy, compute_label_entropy, total_variation\n'), ((10198, 10230), 'numpy.cumsum', 'np.cumsum', (['sim_stats.num_fetches'], {}), '(sim_stats.num_fetches)\n', (10207, 10230), True, 'import numpy as np\n'), ((10307, 10339), 'numpy.cumsum', 'np.cumsum', (['sim_stats.num_fetches'], {}), '(sim_stats.num_fetches)\n', (10316, 10339), True, 'import numpy as np\n'), ((11751, 11783), 'numpy.cumsum', 'np.cumsum', (['sim_stats.num_fetches'], {}), '(sim_stats.num_fetches)\n', (11760, 11783), True, 'import numpy as np\n'), ((2886, 2924), 'numpy.where', 'np.where', (['true_label_counts[sample_id]'], {}), '(true_label_counts[sample_id])\n', (2894, 2924), True, 'import numpy as np\n')]
|
from FastTextRank.FastTextRank4Word import FastTextRank4Word
from cnlglsearch.utils.legal_doc import LegalDoc
tr4s = FastTextRank4Word(tol=0.0001)
fp = open("aaa.txt",encoding="utf-8")
aa = fp.read()
fp.close()
tr4s.summarize("原告孙高长与被告长沙斯盛消防安全工程有限公司、娄底市三湘房地产开发有限公司追索劳动报酬纠纷民事一审调解书\n 湖南省新化县人民法院\n民 事 裁 定 书\n(2018)湘1322民初3836号\n原告孙某长,男,1972年4月20日出生,汉族,住湖南省新化县。\n委托代理人张正平,男,1987年4月4日出生,汉族,住湖南省新化县。\n代理权限:特别授权。\n被告长沙斯盛消防安全工程有限公司。\n法定代表人李松良。\n被告娄底市三湘房地产开发有限公司。\n法定代表人朱解长。\n原告孙某长诉被告长沙斯盛消防安全工程有限公司、娄底市三湘房地产开发有限公司追索劳动报酬纠纷一案,本院于2018年12月20日立案。原告孙某长于2018年12月21日向本院提出撤诉申请。\n本院认为,原告孙某长以原告与被告长沙斯盛消防安全工程有限公司、娄底市三湘房地产开发有限公司已达成和解为由申请撤回对被告长沙斯盛消防安全工程有限公司、娄底市三湘房地产开发有限公司的起诉,原告孙高长的申请符合法律规定,应予准许。依照《中华人民共和国民事诉讼法》第一百四十五条第一款之规定,裁定如下:\n准许原告孙某长撤诉。\n案件受理费10元,因撤诉减半收取5元,由被告娄底市三湘房地产开发有限公司负担。\n审 判 员 袁桂萍\n二〇一八年十二月二十一日\n法官助理欧阳昭\n代理书记员 曹玉婷", 10)
# l = LegalDoc()
# l.read_from_str(aa)
# # l.to_dict()
# {
# "from": 0,
# "size": 20,
# "query": {
# "match": {
# "major_text": "消防安全工程师"
# }
# },
# "highlight": {
# "pre_tags": "<em>",
# "post_tags": "</em>",
# "fields": {
# "major_text": {}
# }
# }
# }
|
[
"FastTextRank.FastTextRank4Word.FastTextRank4Word"
] |
[((118, 147), 'FastTextRank.FastTextRank4Word.FastTextRank4Word', 'FastTextRank4Word', ([], {'tol': '(0.0001)'}), '(tol=0.0001)\n', (135, 147), False, 'from FastTextRank.FastTextRank4Word import FastTextRank4Word\n')]
|
import msl.package_manager as pm
def test_github():
pkgs = pm.github()
assert pm._PKG_NAME in pkgs
pkg = pkgs[pm._PKG_NAME]
assert len(pkg['description']) > 0
assert len(pkg['version']) > 0
assert len(pkg['tags']) > 10
assert len(pkg['branches']) > 0
|
[
"msl.package_manager.github"
] |
[((65, 76), 'msl.package_manager.github', 'pm.github', ([], {}), '()\n', (74, 76), True, 'import msl.package_manager as pm\n')]
|
import os
import sys
import psycopg2 as sql
from importlib import import_module
db_conf = import_module('0_postgres_db_conf',os.getcwd() + '\\0_postgres_db_conf.py').db_conf
log = import_module('write_log', os.getcwd() + '\\write_log.py').Log('1_load_source_data')
source_data_directory = db_conf['source_data_directory']
TABLE_RESET = True
patient_files = os.listdir(source_data_directory + '\\patient')
practice_files = os.listdir(source_data_directory + '\\practice')
staff_files = os.listdir(source_data_directory + '\\staff')
consultation_files = os.listdir(source_data_directory + '\\consultation')
observation_files = os.listdir(source_data_directory + '\\observation')
drugissue_files = os.listdir(source_data_directory + '\\drugissue')
problem_files = os.listdir(source_data_directory + '\\problem')
referral_files = os.listdir(source_data_directory + '\\referral')
def get_table_count(table_name, console_log):
cur.execute(f'select count(1) from {SOURCE_SCHEMA}.{table_name}')
cnt = cur.fetchone()
if console_log:
log.log_message(f'{table_name} row count: {str(cnt)}')
return cnt
try:
cnx = sql.connect(
user=db_conf['username'],
password=db_conf['password'],
database=db_conf['database']
)
cnx.autocommit = True
cur = cnx.cursor()
SOURCE_SCHEMA = db_conf['source_schema']
if TABLE_RESET:
for query in open(os.getcwd() + '\\sql_scripts\\create_aurum_tables.sql').read().split(';'):
if query != '':
query = query.replace('{SOURCE_SCHEMA}', SOURCE_SCHEMA)
cur.execute(query + ';')
cur.execute('set datestyle to \'ISO,DMY\'')
for filename in patient_files:
log.log_message('Processing: ' + filename)
cur.execute(f'COPY {SOURCE_SCHEMA}.patient FROM \'{source_data_directory}\\patient\\{filename}\' WITH DELIMITER E\'\t\' CSV HEADER QUOTE E\'\b\'')
get_table_count('patient', console_log=True)
for filename in practice_files:
log.log_message('Processing: ' + filename)
cur.execute(f'COPY {SOURCE_SCHEMA}.practice FROM \'{source_data_directory}\\practice\\{filename}\' WITH DELIMITER E\'\t\' CSV HEADER QUOTE E\'\b\'')
get_table_count('practice', console_log=True)
for filename in staff_files:
log.log_message('Processing: ' + filename)
cur.execute(f'COPY {SOURCE_SCHEMA}.staff FROM \'{source_data_directory}\\staff\\{filename}\' WITH DELIMITER E\'\t\' CSV HEADER QUOTE E\'\b\'')
get_table_count('staff', console_log=True)
for filename in consultation_files:
log.log_message('Processing: ' + filename)
cur.execute(f'COPY {SOURCE_SCHEMA}.consultation FROM \'{source_data_directory}\\consultation\\{filename}\' WITH DELIMITER E\'\t\' CSV HEADER QUOTE E\'\b\'')
get_table_count('consultation', console_log=True)
for filename in observation_files:
log.log_message('Processing: ' + filename)
cur.execute(f'COPY {SOURCE_SCHEMA}.observation FROM \'{source_data_directory}\\observation\\{filename}\' WITH DELIMITER E\'\t\' CSV HEADER QUOTE E\'\b\'')
get_table_count('observation', console_log=True)
for filename in drugissue_files:
log.log_message('Processing: ' + filename)
cur.execute(f'COPY {SOURCE_SCHEMA}.drugissue FROM \'{source_data_directory}\\drugissue\\{filename}\' WITH DELIMITER E\'\t\' CSV HEADER QUOTE E\'\b\'')
get_table_count('drugissue', console_log=True)
for filename in problem_files:
log.log_message('Processing: ' + filename)
cur.execute(f'COPY {SOURCE_SCHEMA}.problem FROM \'{source_data_directory}\\problem\\{filename}\' WITH DELIMITER E\'\t\' CSV HEADER QUOTE E\'\b\'')
get_table_count('problem', console_log=True)
for filename in referral_files:
log.log_message('Processing: ' + filename)
cur.execute(f'COPY {SOURCE_SCHEMA}.referral FROM \'{source_data_directory}\\referral\\{filename}\' WITH DELIMITER E\'\t\' CSV HEADER QUOTE E\'\b\'')
get_table_count('referral', console_log=True)
for query in open(os.getcwd() + '\\sql_scripts\\build_aurum_keys_index.sql').read().split(';'):
query = query.strip()
query = query.replace('{SOURCE_SCHEMA}', SOURCE_SCHEMA)
if query != '':
log.log_message(query)
cur.execute(query)
except:
log.log_message(str(sys.exc_info()[1]))
finally:
cnx.close()
|
[
"os.getcwd",
"os.listdir",
"sys.exc_info",
"psycopg2.connect"
] |
[((365, 412), 'os.listdir', 'os.listdir', (["(source_data_directory + '\\\\patient')"], {}), "(source_data_directory + '\\\\patient')\n", (375, 412), False, 'import os\n'), ((430, 478), 'os.listdir', 'os.listdir', (["(source_data_directory + '\\\\practice')"], {}), "(source_data_directory + '\\\\practice')\n", (440, 478), False, 'import os\n'), ((493, 538), 'os.listdir', 'os.listdir', (["(source_data_directory + '\\\\staff')"], {}), "(source_data_directory + '\\\\staff')\n", (503, 538), False, 'import os\n'), ((560, 612), 'os.listdir', 'os.listdir', (["(source_data_directory + '\\\\consultation')"], {}), "(source_data_directory + '\\\\consultation')\n", (570, 612), False, 'import os\n'), ((633, 684), 'os.listdir', 'os.listdir', (["(source_data_directory + '\\\\observation')"], {}), "(source_data_directory + '\\\\observation')\n", (643, 684), False, 'import os\n'), ((703, 752), 'os.listdir', 'os.listdir', (["(source_data_directory + '\\\\drugissue')"], {}), "(source_data_directory + '\\\\drugissue')\n", (713, 752), False, 'import os\n'), ((769, 816), 'os.listdir', 'os.listdir', (["(source_data_directory + '\\\\problem')"], {}), "(source_data_directory + '\\\\problem')\n", (779, 816), False, 'import os\n'), ((834, 882), 'os.listdir', 'os.listdir', (["(source_data_directory + '\\\\referral')"], {}), "(source_data_directory + '\\\\referral')\n", (844, 882), False, 'import os\n'), ((1140, 1241), 'psycopg2.connect', 'sql.connect', ([], {'user': "db_conf['username']", 'password': "db_conf['password']", 'database': "db_conf['database']"}), "(user=db_conf['username'], password=db_conf['password'],\n database=db_conf['database'])\n", (1151, 1241), True, 'import psycopg2 as sql\n'), ((129, 140), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (138, 140), False, 'import os\n'), ((211, 222), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (220, 222), False, 'import os\n'), ((4352, 4366), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4364, 4366), False, 'import sys\n'), ((4058, 4069), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4067, 4069), False, 'import os\n'), ((1408, 1419), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1417, 1419), False, 'import os\n')]
|
#!/usr/bin/env python3
"""This Python program stops the Hillview service on the machines specified in the
configuration file."""
# pylint: disable=invalid-name
from argparse import ArgumentParser
from hillviewCommon import ClusterConfiguration, get_config, get_logger
logger = get_logger("stop")
def stop_webserver(config):
"""Stops the Hillview web server"""
assert isinstance(config, ClusterConfiguration)
rh = config.get_webserver()
message = "Stopping web server on " + str(rh)
logger.info(message)
rh.run_remote_shell_command(config.service_folder + "/hillview-webserver-manager.sh stop")
def stop_worker(config, rh):
"""Stops a Hillview worker service on a remote machine"""
rh.run_remote_shell_command(config.service_folder + "/hillview-worker-manager.sh stop")
def stop_aggregator(config, rh):
"""Stops a Hillview aggregator service on a remote machine"""
rh.run_remote_shell_command(config.service_folder + "/hillview-aggregator-manager.sh stop")
def main():
"""Main function"""
parser = ArgumentParser()
parser.add_argument("config", help="json cluster configuration file")
args = parser.parse_args()
config = get_config(parser, args)
stop_webserver(config)
config.run_on_all_workers(lambda rh: stop_worker(config, rh))
config.run_on_all_aggregators(lambda rh: stop_aggregator(config, rh))
if __name__ == "__main__":
main()
|
[
"hillviewCommon.get_config",
"hillviewCommon.get_logger",
"argparse.ArgumentParser"
] |
[((283, 301), 'hillviewCommon.get_logger', 'get_logger', (['"""stop"""'], {}), "('stop')\n", (293, 301), False, 'from hillviewCommon import ClusterConfiguration, get_config, get_logger\n'), ((1055, 1071), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1069, 1071), False, 'from argparse import ArgumentParser\n'), ((1190, 1214), 'hillviewCommon.get_config', 'get_config', (['parser', 'args'], {}), '(parser, args)\n', (1200, 1214), False, 'from hillviewCommon import ClusterConfiguration, get_config, get_logger\n')]
|
import copy
import logging
from itertools import chain
import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit import config
from rltoolkit.algorithms.ddpg import DDPG
from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic
logger = logging.getLogger(__name__)
class SAC(DDPG):
def __init__(
self,
alpha_lr: float = config.ALPHA_LR,
alpha: float = config.ALPHA,
tau: float = config.TAU,
pi_update_freq: int = config.PI_UPDATE_FREQ,
act_noise: float = 0,
*args,
**kwargs,
):
f"""Soft Actor-Critic implementation
Args:
alpha_lr (float, optional): Learning rate of the alpha.
Defaults to { config.ALPHA_LR }.
alpha (float, optional): Initial alpha value. Defaults to { config.ALPHA }.
pi_update_freq (int, optional): Frequency of policy updates
(in SAC updates). Defaults to { config.PI_UPDATE_FREQ }.
act_noise (float, optional): Actions noise multiplier.
Defaults to { 0 }.
actor_lr (float, optional): Learning rate of the actor.
Defaults to { config.DDPG_LR }.
critic_lr (float, optional): Learning rate of the critic.
Defaults to { config.DDPG_LR }.
tau (float, optional): Tau coefficient for polyak averaging.
Defaults to { config.TAU }.
update_batch_size (int, optional): Batch size for gradient step.
Defaults to { config.UPDATE_BATCH_SIZE }.
buffer_size (int, optional): Size of replay buffer.
Defaults to { config.BUFFER_SIZE }.
random_frames (int, optional): Number of frames with random actions at
the beggining. Defaults to { config.RANDOM_FRAMES }.
update_freq (int, optional): Freqency of SAC updates (in frames).
Defaults to { config.UPDATE_FREQ }.
grad_steps (int, optional): Number of SAC updates for one step.
Defaults to { config.GRAD_STEPS }.
env_name (str, optional): Name of the gym environment.
Defaults to { config.ENV_NAME }.
gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.
stats_freq (int, optional): Frequency of logging the progress.
Defaults to { config.STATS_FREQ }.
steps_per_epoch (int, optional): Number of frames used for one algorithm step
(could be higher because batch collection stops when rollout ends).
Defaults to { config.STEPS_PER_EPOCH }.
iterations (int, optional): Number of algorithms iterations.
Defaults to { config.ITERATIONS }.
max_frames (int, optional): Limit of frames for training.
Defaults to { None }.
return_done (Union[int, None], optional): target return, which will stop
training if reached. Defaults to { config.RETURN_DONE }.
log_dir (str, optional): Path for basic logs which includes final model.
Defaults to { config.LOG_DIR }.
use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.
render (bool, optional): Render rollouts to tensorboard.
Defaults to { config.RENDER }.
"""
super().__init__(*args, **kwargs)
self._critic_1 = None
self.critic_1_optimizer = None
self.critic_1_targ = None
self._critic_2 = None
self.critic_2_optimizer = None
self.critic_2_targ = None
self.alpha_lr = alpha_lr
self.alpha = alpha
self.pi_update_freq = pi_update_freq
self.actor = SAC_Actor(self.ob_dim, self.ac_lim, self.ac_dim, self.discrete)
self.critic_1 = SAC_Critic(self.ob_dim, self.ac_dim, self.discrete)
self.critic_2 = SAC_Critic(self.ob_dim, self.ac_dim, self.discrete)
self.loss = {"actor": 0.0, "critic_1": 0.0, "critic_2": 0.0}
new_hparams = {
"hparams/alpha_lr": self.alpha_lr,
"hparams/alpha": self.alpha,
"hparams/pi_update_freq": self.pi_update_freq,
}
self.hparams.update(new_hparams)
self.target_entropy = -torch.prod(
torch.tensor(self.ac_dim, dtype=torch.float32)
).item()
self.log_alpha = torch.tensor(
np.log(self.alpha), requires_grad=True, device=self.device
)
self.alpha_opt = self.opt([self.log_alpha], lr=alpha_lr)
@property
def actor(self):
return self._actor
@actor.setter
def actor(self, model: torch.nn.Module):
self._actor, self.actor_optimizer = self.set_model(model, self.actor_lr)
@property
def critic_1(self):
return self._critic_1
@critic_1.setter
def critic_1(self, model: torch.nn.Module):
self._critic_1, self.critic_1_optimizer = self.set_model(model, self.critic_lr)
self.critic_1_targ = copy.deepcopy(self._critic_1)
@property
def critic_2(self):
return self._critic_2
@critic_2.setter
def critic_2(self, model: torch.nn.Module):
self._critic_2, self.critic_2_optimizer = self.set_model(model, self.critic_lr)
self.critic_2_targ = copy.deepcopy(self._critic_2)
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
sampled_next_action, sampled_next_logprob = self._actor(next_obs)
q1_target = self.critic_1_targ(next_obs, sampled_next_action)
q2_target = self.critic_2_targ(next_obs, sampled_next_action)
q_target = torch.min(q1_target, q2_target)
qfunc_target = reward + self.gamma * (1 - done) * (
q_target - self.alpha * sampled_next_logprob
)
return qfunc_target
def compute_pi_loss(
self,
obs: torch.Tensor,
sampled_action: torch.Tensor,
sampled_logprob: torch.Tensor,
):
"""Loss for the policy
Args:
obs (torch.Tensor): batch of observations
sampled_action (torch.Tensor): actions sampled from policy
sampled_logprob (torch.Tensor): log-probabilities of actions
Returns:
torch.Tensor: policy loss
"""
q1 = self._critic_1(obs, sampled_action)
q2 = self._critic_2(obs, sampled_action)
q = torch.min(q1, q2)
loss = (self.alpha * sampled_logprob - q).mean()
return loss
def update_target_q(self):
"""Update target networks with Polyak averaging
"""
with torch.no_grad():
# Polyak averaging:
critics_params = chain(
self._critic_1.parameters(), self._critic_2.parameters()
)
targets_params = chain(
self.critic_1_targ.parameters(), self.critic_2_targ.parameters()
)
for q_params, targ_params in zip(critics_params, targets_params):
targ_params.data.mul_(1 - self.tau)
targ_params.data.add_((self.tau) * q_params.data)
def compute_alpha_loss(self, sampled_logprob: torch.Tensor):
"""Compute loss for temperature update
Args:
sampled_logprob (torch.Tensor): batch of sampled log-probabilities
from the actor
Returns:
torch.Tensor: loss for temperature (alpha)
"""
# alpha_loss = (
# self.log_alpha * (-sampled_logprob.detach() - self.target_entropy)
# ).mean()
sampled_logprob = sampled_logprob.detach()
alpha_loss = self.log_alpha.exp() * (-sampled_logprob - self.target_entropy)
return alpha_loss.mean()
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
):
"""Soft Actor-Critic update:
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
"""
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-functions by one step
y_q1 = self._critic_1(obs, action)
loss_q1 = F.mse_loss(y_q1, y)
y_q2 = self._critic_2(obs, action)
loss_q2 = F.mse_loss(y_q2, y)
self.loss["critic_1"] = loss_q1.item()
self.loss["critic_2"] = loss_q2.item()
self.critic_1_optimizer.zero_grad()
loss_q1.backward()
self.critic_1_optimizer.step()
self.critic_2_optimizer.zero_grad()
loss_q2.backward()
self.critic_2_optimizer.step()
# Update policy by one step
self._critic_1.eval()
self._critic_2.eval()
sampled_action, sampled_logprob = self._actor(obs)
# if self.stats_logger.frames % (self.update_freq * self.pi_update_freq) == 0:
loss = self.compute_pi_loss(obs, sampled_action, sampled_logprob)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
# Update target networks
self.update_target_q()
self._critic_1.train()
self._critic_2.train()
# Update temperature
alpha_loss = self.compute_alpha_loss(sampled_logprob)
self.alpha_opt.zero_grad()
alpha_loss.backward()
self.alpha_opt.step()
self.alpha = self.log_alpha.exp().item()
def add_tensorboard_logs(self, *args, **kwargs):
super().add_tensorboard_logs(*args, **kwargs)
if self.debug_mode:
self.tensorboard_writer.log_sac_alpha(self.iteration, self.alpha)
def collect_params_dict(self):
params_dict = {}
params_dict["actor"] = self.actor.state_dict()
params_dict["critic_1"] = self.critic_1.state_dict()
params_dict["critic_2"] = self.critic_2.state_dict()
params_dict["obs_mean"] = self.replay_buffer.obs_mean
params_dict["obs_std"] = self.replay_buffer.obs_std
params_dict["min_obs"] = self.replay_buffer.min_obs
params_dict["max_obs"] = self.replay_buffer.max_obs
return params_dict
def apply_params_dict(self, params_dict):
self.actor.load_state_dict(params_dict["actor"])
self.critic_1.load_state_dict(params_dict["critic_1"])
self.critic_2.load_state_dict(params_dict["critic_2"])
self.obs_mean = params_dict["obs_mean"]
self.obs_std = params_dict["obs_std"]
self.min_obs = params_dict["min_obs"]
self.max_obs = params_dict["max_obs"]
self.replay_buffer.obs_mean = self.obs_mean
self.replay_buffer.obs_std = self.obs_std
self.replay_buffer.min_obs = self.min_obs
self.replay_buffer.max_obs = self.max_obs
def save_model(self, save_path=None) -> str:
if self.filename is None and save_path is None:
raise AttributeError
elif save_path is None:
save_path = str(self.log_path)
torch.save(self._actor.state_dict(), save_path + "_actor_model.pt")
torch.save(self._critic_1.state_dict(), save_path + "_critic_1_model.pt")
torch.save(self._critic_2.state_dict(), save_path + "_critic_2_model.pt")
return save_path
if __name__ == "__main__":
#with torch.cuda.device(1):
model = SAC(
env_name="HalfCheetah-v2",
iterations=200,
gamma=0.99,
steps_per_epoch=1000,
stats_freq=5,
test_episodes=2,
update_batch_size=100,
update_freq=50,
grad_steps=50,
# random_frames=10000,
use_gpu=True,
obs_norm=False,
tensorboard_dir="logs_norm",
tensorboard_comment="",
)
model.train()
|
[
"copy.deepcopy",
"torch.tensor",
"numpy.log",
"torch.nn.functional.mse_loss",
"rltoolkit.algorithms.sac.models.SAC_Actor",
"rltoolkit.algorithms.sac.models.SAC_Critic",
"torch.no_grad",
"torch.min",
"logging.getLogger"
] |
[((274, 301), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (291, 301), False, 'import logging\n'), ((4135, 4198), 'rltoolkit.algorithms.sac.models.SAC_Actor', 'SAC_Actor', (['self.ob_dim', 'self.ac_lim', 'self.ac_dim', 'self.discrete'], {}), '(self.ob_dim, self.ac_lim, self.ac_dim, self.discrete)\n', (4144, 4198), False, 'from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic\n'), ((4223, 4274), 'rltoolkit.algorithms.sac.models.SAC_Critic', 'SAC_Critic', (['self.ob_dim', 'self.ac_dim', 'self.discrete'], {}), '(self.ob_dim, self.ac_dim, self.discrete)\n', (4233, 4274), False, 'from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic\n'), ((4299, 4350), 'rltoolkit.algorithms.sac.models.SAC_Critic', 'SAC_Critic', (['self.ob_dim', 'self.ac_dim', 'self.discrete'], {}), '(self.ob_dim, self.ac_dim, self.discrete)\n', (4309, 4350), False, 'from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic\n'), ((5412, 5441), 'copy.deepcopy', 'copy.deepcopy', (['self._critic_1'], {}), '(self._critic_1)\n', (5425, 5441), False, 'import copy\n'), ((5698, 5727), 'copy.deepcopy', 'copy.deepcopy', (['self._critic_2'], {}), '(self._critic_2)\n', (5711, 5727), False, 'import copy\n'), ((7206, 7223), 'torch.min', 'torch.min', (['q1', 'q2'], {}), '(q1, q2)\n', (7215, 7223), False, 'import torch\n'), ((9203, 9222), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_q1', 'y'], {}), '(y_q1, y)\n', (9213, 9222), True, 'from torch.nn import functional as F\n'), ((9284, 9303), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_q2', 'y'], {}), '(y_q2, y)\n', (9294, 9303), True, 'from torch.nn import functional as F\n'), ((4814, 4832), 'numpy.log', 'np.log', (['self.alpha'], {}), '(self.alpha)\n', (4820, 4832), True, 'import numpy as np\n'), ((6166, 6181), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6179, 6181), False, 'import torch\n'), ((6432, 6463), 'torch.min', 'torch.min', (['q1_target', 'q2_target'], {}), '(q1_target, q2_target)\n', (6441, 6463), False, 'import torch\n'), ((7415, 7430), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7428, 7430), False, 'import torch\n'), ((4699, 4745), 'torch.tensor', 'torch.tensor', (['self.ac_dim'], {'dtype': 'torch.float32'}), '(self.ac_dim, dtype=torch.float32)\n', (4711, 4745), False, 'import torch\n')]
|
import argparse
import asyncio
import shlex
import sys
import pathlib
from asyncio.subprocess import PIPE
async def tee(stream, streams, prefix):
line = await stream.readline()
while line.endswith(b'\n'):
for s, p in zip(streams, prefix):
s.write(p + line)
if hasattr(s, 'flush'):
s.flush()
line = await stream.readline()
if line:
for s, p in zip(streams, prefix):
s.write(p + line + b" % No new line\n")
if hasattr(s, 'flush'):
s.flush()
async def show_exit_code(process, prefix):
print(prefix, await process.wait(), sep='')
async def async_main(argv=sys.argv):
parser = argparse.ArgumentParser(pathlib.Path(argv[0]).name)
parser.add_argument("program1", help="Command to execute first program")
parser.add_argument("program2", help="Command to execute second program")
parser.add_argument('--disable-stdout', default=False, action="store_true", help="Do not show stdout")
parser.add_argument("--program1-stdout-prefix",
metavar="PREFIX",
default="Program 1 (stdout): ",
help="Prefix to add before the first program's stdout")
parser.add_argument("--program1-stderr-prefix",
metavar="PREFIX",
default="Program 1 (stderr): ",
help="Prefix to add before the first program's stderr")
parser.add_argument("--program2-stdout-prefix",
metavar="PREFIX",
default="Program 2 (stdout): ",
help="Prefix to add before the second program's stdout")
parser.add_argument("--program2-stderr-prefix",
metavar="PREFIX",
default="Program 2 (stderr): ",
help="Prefix to add before the second program's stderr")
args = parser.parse_args(argv[1:])
process_1 = await asyncio.create_subprocess_exec(*shlex.split(args.program1), stdin=PIPE, stdout=PIPE, stderr=PIPE)
process_2 = await asyncio.create_subprocess_exec(*shlex.split(args.program2), stdin=PIPE, stdout=PIPE, stderr=PIPE)
program1_stdout_prefix = args.program1_stdout_prefix.encode("utf-8")
program1_stderr_prefix = args.program1_stderr_prefix.encode("utf-8")
program2_stdout_prefix = args.program2_stdout_prefix.encode("utf-8")
program2_stderr_prefix = args.program2_stderr_prefix.encode("utf-8")
process_1_stdout_tee = [process_2.stdin]
process_1_stdout_tee_prefixes = [b""]
process_2_stdout_tee = [process_1.stdin]
process_2_stdout_tee_prefixes = [b""]
if not args.disable_stdout:
process_1_stdout_tee.append(sys.stdout.buffer)
process_1_stdout_tee_prefixes.append(program1_stdout_prefix)
process_2_stdout_tee.append(sys.stdout.buffer)
process_2_stdout_tee_prefixes.append(program2_stdout_prefix)
await asyncio.gather(
tee(process_1.stdout, process_1_stdout_tee, process_1_stdout_tee_prefixes),
tee(process_2.stdout, process_2_stdout_tee, process_2_stdout_tee_prefixes),
tee(process_1.stderr, [sys.stdout.buffer], [program1_stderr_prefix]),
tee(process_2.stderr, [sys.stdout.buffer], [program2_stderr_prefix]),
show_exit_code(process_1, "Program 1 Exited with Code: "),
show_exit_code(process_2, "Program 2 Exited with Code: "),
)
def main(argv=sys.argv):
asyncio.run(async_main(argv))
if __name__ == "__main__":
main()
|
[
"pathlib.Path",
"shlex.split"
] |
[((727, 748), 'pathlib.Path', 'pathlib.Path', (['argv[0]'], {}), '(argv[0])\n', (739, 748), False, 'import pathlib\n'), ((2034, 2060), 'shlex.split', 'shlex.split', (['args.program1'], {}), '(args.program1)\n', (2045, 2060), False, 'import shlex\n'), ((2154, 2180), 'shlex.split', 'shlex.split', (['args.program2'], {}), '(args.program2)\n', (2165, 2180), False, 'import shlex\n')]
|
# Copyright 2017 Mycroft AI, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from padatious.match_data import MatchData
class TestMatchData:
def setup(self):
self.match = MatchData('name', ['one', 'two'], {'{word}': ['value', 'tokens']}, 0.5)
def test_detokenize(self):
self.match.detokenize()
assert self.match.sent == 'one two'
correct_match = MatchData('name', 'one two', {'word': 'value tokens'}, 0.5)
assert self.match.__dict__ == correct_match.__dict__
|
[
"padatious.match_data.MatchData"
] |
[((689, 760), 'padatious.match_data.MatchData', 'MatchData', (['"""name"""', "['one', 'two']", "{'{word}': ['value', 'tokens']}", '(0.5)'], {}), "('name', ['one', 'two'], {'{word}': ['value', 'tokens']}, 0.5)\n", (698, 760), False, 'from padatious.match_data import MatchData\n'), ((894, 953), 'padatious.match_data.MatchData', 'MatchData', (['"""name"""', '"""one two"""', "{'word': 'value tokens'}", '(0.5)'], {}), "('name', 'one two', {'word': 'value tokens'}, 0.5)\n", (903, 953), False, 'from padatious.match_data import MatchData\n')]
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['TestCase01GetPayThroughDetailsAPITestCase::test_case status'] = 200
snapshots['TestCase01GetPayThroughDetailsAPITestCase::test_case body'] = {
'upi_id': '12345678900@SBI'
}
snapshots['TestCase01GetPayThroughDetailsAPITestCase::test_case header_params'] = {
'content-language': [
'Content-Language',
'en'
],
'content-length': [
'29',
'Content-Length'
],
'content-type': [
'Content-Type',
'text/html; charset=utf-8'
],
'vary': [
'Accept-Language, Origin, Cookie',
'Vary'
],
'x-frame-options': [
'SAMEORIGIN',
'X-Frame-Options'
]
}
snapshots['TestCase01GetPayThroughDetailsAPITestCase::test_case upi_id'] = '12345678900@SBI'
|
[
"snapshottest.Snapshot"
] |
[((156, 166), 'snapshottest.Snapshot', 'Snapshot', ([], {}), '()\n', (164, 166), False, 'from snapshottest import Snapshot\n')]
|
# Our approach to model groups is:
# * Build an NFA out of them.
# * Translate our NFA to automata-lib's NFA.
# * Using automata-lib, convert the NFA to a DFA.
# * Using python-automata, minify the DFA.
# * Rename the DFA states.
# * Return the DFA, where it would be emitted as C++ code.
import xmlschema # type: ignore
from xmlschema.validators import ( # type: ignore
XsdElement,
XsdGroup
)
from automata.fa.dfa import DFA # type: ignore
from automata.fa.nfa import NFA # type: ignore
from .third_party.DFA import DFA as pDFA # type: ignore
from itertools import permutations
from typing import List, Tuple, Dict, Set, Union
from pprint import pprint
class XsdDFA:
states: Set[int]
start: int
accepts: Set[int]
alphabet: List[str]
transitions: Dict[int, Dict[str, int]]
def dfa_from_group(t: XsdGroup) -> XsdDFA:
# Fill in a NFA of automata-lib type.
_nfa_states: Set[str] = set()
_nfa_state_transitions: Dict[str, Dict[str, Set[Union[str, None]]]] = {}
_alphabet: List[str] = []
def _new_state() -> str:
x = "q%d" % len(_nfa_states)
_nfa_states.add(x)
return x
def _remove_state(x: str):
_nfa_states.remove(x)
_nfa_state_transitions.pop(x)
def _add_transition(state: str, input: str, next: Union[str, None]):
if _nfa_state_transitions.get(state) is None:
_nfa_state_transitions[state] = {input: {next}}
else:
if _nfa_state_transitions[state].get(input) is None:
_nfa_state_transitions[state][input] = {next}
else:
_nfa_state_transitions[state][input].add(next)
def _patch(state: str, next: str):
for k, v in _nfa_state_transitions[state].items():
_nfa_state_transitions[state][k] = {next if x is None else x for x in v}
# start --a-->
def _nfa_from_element(t: XsdElement) -> Tuple[str, Set[str]]:
x = _new_state()
_add_transition(x, t.name, None)
_alphabet.append(t.name)
return (x, {x})
# start --a-> O --b--> O --c-->
def _nfa_from_sequence(t: XsdGroup) -> Tuple[str, Set[str]]:
init, vacant = _nfa_from_node(t._group[0])
for e in t._group[1:]:
init2, vacant2 = _nfa_from_node(e)
for v in vacant:
_patch(v, init2)
vacant = vacant2
return (init, vacant)
# |-----a->
# start --b->
# |-----c->
def _nfa_from_choice(t: XsdGroup) -> Tuple[str, Set[str]]:
x = _new_state()
vacants: Set[str] = set()
for e in t._group:
init, vacant = _nfa_from_node(e)
_add_transition(x, "", init)
vacants |= vacant
return (x, vacants)
# Generate a NFA from a model group or element.
# Return start state and "before-final" states which include transitions
# to None which denote vacant out-going arrows.
def _nfa_from_node(t: Union[XsdElement, XsdGroup]) -> Tuple[str, Set[str]]:
if isinstance(t, XsdElement):
init, vacant = _nfa_from_element(t)
elif t.model == "sequence":
init, vacant = _nfa_from_sequence(t)
elif t.model == "choice":
init, vacant = _nfa_from_choice(t)
elif t.model == "all":
raise NotImplementedError("Only top-level <xs:all> is supported.")
else:
raise NotImplementedError("I don't know what to do with model group node %s." % t)
if t.occurs == [1, 1]:
return (init, vacant)
elif t.occurs == [0, 1]:
_add_transition(init, "", None)
vacant.add(init)
return (init, vacant)
elif t.occurs == [0, None]:
for v in vacant:
_patch(v, init)
_add_transition(init, "", None)
return (init, {init})
elif t.occurs == [1, None]:
next = _new_state()
for v in vacant:
_patch(v, next)
_add_transition(next, "", init)
_add_transition(next, "", None)
return (init, {next})
else:
raise NotImplementedError("(min_occurs, max_occurs) pair %s is not supported" % t.occurs)
init, finals = _nfa_from_node(t)
final = _new_state()
_nfa_state_transitions[final] = {}
for f in finals:
_patch(f, final)
input_symbols: Set[str] = set()
for v in _nfa_state_transitions.values():
input_symbols |= set(v.keys())
# Remove epsilon from the alphabet.
if "" in input_symbols: input_symbols.remove("")
nfa = NFA(states=_nfa_states,
input_symbols=input_symbols,
transitions=_nfa_state_transitions,
initial_state=init,
final_states={final})
dfa = DFA.from_nfa(nfa)
pdfa = pDFA(dfa.states,
dfa.input_symbols,
lambda q,c: dfa.transitions[q][c],
dfa.initial_state,
dfa.final_states)
pdfa.minimize()
# "{}" comes from automata-lib and means trap state.
# We will filter transitions to the trap state out and emit
# an error message if a state transition is not found for the given input.
#
# Sets are converted to lists, because this is a final format and we need ordering.
state_map = {k: v for v, k in enumerate([x for x in pdfa.states if x != "{}"])}
out = XsdDFA()
out.states = {state_map[x] for x in pdfa.states if x != "{}"}
out.start = state_map[pdfa.start]
out.accepts = {state_map[x] for x in pdfa.accepts if x != "{}"}
out.alphabet = _alphabet
out.transitions = {state_map[q]: {k: state_map[pdfa.delta(q, k)] for k in pdfa.alphabet if pdfa.delta(q, k) != "{}"} for q in pdfa.states if q != "{}"}
return out
|
[
"automata.fa.dfa.DFA.from_nfa",
"automata.fa.nfa.NFA"
] |
[((3998, 4133), 'automata.fa.nfa.NFA', 'NFA', ([], {'states': '_nfa_states', 'input_symbols': 'input_symbols', 'transitions': '_nfa_state_transitions', 'initial_state': 'init', 'final_states': '{final}'}), '(states=_nfa_states, input_symbols=input_symbols, transitions=\n _nfa_state_transitions, initial_state=init, final_states={final})\n', (4001, 4133), False, 'from automata.fa.nfa import NFA\n'), ((4148, 4165), 'automata.fa.dfa.DFA.from_nfa', 'DFA.from_nfa', (['nfa'], {}), '(nfa)\n', (4160, 4165), False, 'from automata.fa.dfa import DFA\n')]
|
import requests
from datetime import datetime, timedelta
import xml.etree.ElementTree as ET
from urllib import parse
class Covid19:
items = None
covid19Url = None
c19Key = None
covid19Url = None
covid19QueryParams = None
def __init__(self):
# 오늘 날짜
todayDate = datetime.today()
if int(todayDate.strftime("%H%M%S")) > 103000:
startDate = (todayDate - timedelta(days=1)).strftime("%Y%m%d")
endDate = todayDate.strftime("%Y%m%d")
else:
startDate = (todayDate - timedelta(days=2)).strftime("%Y%m%d")
endDate = (todayDate - timedelta(days=1)).strftime("%Y%m%d")
# API URL
self.covid19Url = 'http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19InfStateJson'
# Service Key 불러오기
c19KeyFile = open('./covid19/servicekey.txt', 'r')
self.c19Key = c19KeyFile.readline()
# Parameters
covid19QueryParams = '?' + parse.urlencode({parse.quote_plus('ServiceKey'): self.c19Key,
parse.quote_plus('pageNo'): '1',
parse.quote_plus('numOfRows'): '10',
parse.quote_plus('startCreateDt'): startDate,
parse.quote_plus('endCreateDt'): endDate})
# Response
covid19XMLData = requests.get(self.covid19Url + covid19QueryParams).text
# Text to XML
covid19Tree = ET.fromstring(covid19XMLData)
# Get Item (코로나 19 API는 가장 최신 데이터가 맨 앞에)
self.items = covid19Tree.findall('body/items/item')
def updateData(self):
# 오늘 날짜
todayDate = datetime.today()
print(todayDate.strftime("%H%M%S"))
if int(todayDate.strftime("%H%M%S")) > 103000:
startDate = (todayDate - timedelta(days=1)).strftime("%Y%m%d")
endDate = todayDate.strftime("%Y%m%d")
else:
startDate = (todayDate - timedelta(days=2)).strftime("%Y%m%d")
endDate = (todayDate - timedelta(days=1)).strftime("%Y%m%d")
covid19QueryParams = '?' + parse.urlencode({parse.quote_plus('ServiceKey'): self.c19Key,
parse.quote_plus('pageNo'): '1',
parse.quote_plus('numOfRows'): '10',
parse.quote_plus('startCreateDt'): startDate,
parse.quote_plus('endCreateDt'): endDate})
# Response
covid19XMLData = requests.get(self.covid19Url + covid19QueryParams).text
# Text to XML
covid19Tree = ET.fromstring(covid19XMLData)
# Get Item (코로나 19 API는 가장 최신 데이터가 맨 앞에)
self.items = covid19Tree.findall('body/items/item')
def getTodayDecideCount(self):
return int(self.items[0].find("decideCnt").text) - int(self.items[1].find("decideCnt").text)
def getTodayDeathCount(self):
return int(self.items[0].find("deathCnt").text) - int(self.items[1].find("deathCnt").text)
|
[
"datetime.datetime.today",
"xml.etree.ElementTree.fromstring",
"urllib.parse.quote_plus",
"datetime.timedelta",
"requests.get"
] |
[((304, 320), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (318, 320), False, 'from datetime import datetime, timedelta\n'), ((1553, 1582), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['covid19XMLData'], {}), '(covid19XMLData)\n', (1566, 1582), True, 'import xml.etree.ElementTree as ET\n'), ((1756, 1772), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1770, 1772), False, 'from datetime import datetime, timedelta\n'), ((2774, 2803), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['covid19XMLData'], {}), '(covid19XMLData)\n', (2787, 2803), True, 'import xml.etree.ElementTree as ET\n'), ((1452, 1502), 'requests.get', 'requests.get', (['(self.covid19Url + covid19QueryParams)'], {}), '(self.covid19Url + covid19QueryParams)\n', (1464, 1502), False, 'import requests\n'), ((2673, 2723), 'requests.get', 'requests.get', (['(self.covid19Url + covid19QueryParams)'], {}), '(self.covid19Url + covid19QueryParams)\n', (2685, 2723), False, 'import requests\n'), ((995, 1025), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""ServiceKey"""'], {}), "('ServiceKey')\n", (1011, 1025), False, 'from urllib import parse\n'), ((1092, 1118), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""pageNo"""'], {}), "('pageNo')\n", (1108, 1118), False, 'from urllib import parse\n'), ((1177, 1206), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""numOfRows"""'], {}), "('numOfRows')\n", (1193, 1206), False, 'from urllib import parse\n'), ((1266, 1299), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""startCreateDt"""'], {}), "('startCreateDt')\n", (1282, 1299), False, 'from urllib import parse\n'), ((1364, 1395), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""endCreateDt"""'], {}), "('endCreateDt')\n", (1380, 1395), False, 'from urllib import parse\n'), ((2216, 2246), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""ServiceKey"""'], {}), "('ServiceKey')\n", (2232, 2246), False, 'from urllib import parse\n'), ((2313, 2339), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""pageNo"""'], {}), "('pageNo')\n", (2329, 2339), False, 'from urllib import parse\n'), ((2398, 2427), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""numOfRows"""'], {}), "('numOfRows')\n", (2414, 2427), False, 'from urllib import parse\n'), ((2487, 2520), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""startCreateDt"""'], {}), "('startCreateDt')\n", (2503, 2520), False, 'from urllib import parse\n'), ((2585, 2616), 'urllib.parse.quote_plus', 'parse.quote_plus', (['"""endCreateDt"""'], {}), "('endCreateDt')\n", (2601, 2616), False, 'from urllib import parse\n'), ((414, 431), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (423, 431), False, 'from datetime import datetime, timedelta\n'), ((554, 571), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (563, 571), False, 'from datetime import datetime, timedelta\n'), ((627, 644), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (636, 644), False, 'from datetime import datetime, timedelta\n'), ((1911, 1928), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1920, 1928), False, 'from datetime import datetime, timedelta\n'), ((2051, 2068), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (2060, 2068), False, 'from datetime import datetime, timedelta\n'), ((2124, 2141), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2133, 2141), False, 'from datetime import datetime, timedelta\n')]
|
import asyncio
import copy
import random
import sys
import time
from typing import Callable, Dict, List, NoReturn
from abc import ABCMeta, abstractclassmethod, abstractmethod
import praft.exceptions
from praft.typing import T_URL
from praft.log import Entity, EntityType, ConfChangeType, ConfChangeData
from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes,\
PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq,\
StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq,\
LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage
from praft.peer import Peer
from praft.conf import Conf
from praft.state_machine import StateMachine
from praft.log_storage import LogStorage
class StateResult(object):
def __init__(self, state: 'State', data: dict):
self.state: 'State' = state
self.data: object = data
class State(metaclass=ABCMeta):
def __init__(self):
self.id: int = None
self.conf: Conf = None
self.peer_factory: Callable[[int, T_URL], Peer] = None
self.state_machine: StateMachine = None
self.peers: List[Peer] = []
self.current_term: int = 0
self.leader_id = 0
self.voted_for: int = None
self.logs: LogStorage = None
self.commit_index: int = 0 # 已知的已经提交的最大 index
self.last_applied: int = 0 # 已经被应用到本机的 index
self.election_timeout: int = 0 # 对配置时间随机增加 10%
# 最近一次确认 leader 之后,选举超时次数, 用于 prevote 的时候判断,
# 每次被宿主同步日志的时候归零。
self.election_timeout_time: int = 0
# state 变更事件观察者
self.observers: List[Callable[[State, State], NoReturn]] = []
self.role_changed = False # 角色变化之后,对象没有被回收。
# 最后一次快照的信息
self.snapshot_installing = False
self.snapshot_last_included_index: int = 0
self.snapshot_last_included_term: int = 0
@classmethod
def new(cls,
id: int,
conf: Conf,
peer_factory: Callable[[int, T_URL], Peer],
state_machine: StateMachine,
log_storage: LogStorage):
state = cls()
state.id = id
state.conf: Conf = conf
state.peer_factory: Callable[[int, T_URL], Peer] = peer_factory
state.state_machine: StateMachine = state_machine
state.peers: List[Peer] = []
state.current_term: int = 0
state.voted_for: int = None
state.logs: LogStorage = log_storage
return state
def get_quorum(self):
if hasattr(self, "learners"):
return int((len(self.peers) - len(self.learners) + 1) / 2) + 1
else:
return int((len(self.peers) + 1) / 2) + 1
def get_leader(self):
if self.leader_id < 1:
raise praft.exceptions.NotFoundLeader()
for peer in self.peers:
if peer.get_id() == self.leader_id:
return peer
raise praft.exceptions.NotFoundLeader()
def copy(self, old_state: 'State') -> NoReturn:
properties = vars(old_state)
del(properties["role_changed"])
for p, v in properties.items():
if hasattr(self, p):
setattr(self, p, v)
@abstractclassmethod
def become(cls, old_state: 'State') -> 'State':
pass
@abstractmethod
def message(self, msg: Message):
pass
def add_peer(self, peer: Peer):
if peer.get_id() == self.id:
return
self.peers.append(peer)
def commit(self, index: int):
self.commit_index = index
def apply(self):
if self.commit_index == self.last_applied:
return
for i in self.logs.read_from(
self.last_applied + 1,
(self.commit_index - self.last_applied) + 1):
if i.index > self.commit_index:
break
if i.type is EntityType.conf_change:
self.apply_config_change(i)
# 继续执行,也在日志中存储,方便监控和给新加入的节点获取.
self.state_machine.apply(i)
self.last_applied = i.index
if not (self.last_applied % 3): # x 条日志压缩一下
self.create_snapshot(i.index, i.term)
@abstractmethod
def tick(self) -> StateResult:
pass
def read(self, param) -> object:
raise praft.exceptions.NeedLeaderHandle("Need leader to handler.")
def propose(self, data: object):
raise praft.exceptions.NeedLeaderHandle("Need leader to handler.")
def remove_node(self, url: T_URL):
raise praft.exceptions.NeedLeaderHandle("Need leader to handler.")
def leadership_transfer(self):
raise praft.exceptions.NeedLeaderHandle("Need leader to handler.")
def create_snapshot(self, index: int, term: int):
self.snapshot_last_included_index = index
self.snapshot_last_included_term = term
self.logs.create_snapshot(self.last_applied, self.state_machine)
async def send_snapshot(self, peer: Peer):
for i in self.logs.read_snapshot():
conf = {
"election_timeout": self.conf.election_timeout,
"heartbeat_interval": self.conf.heartbeat_interval,
}
peers = []
for j in self.peers:
peers.append((j.get_id(), *j.get_url()))
peers.append((self.id, *self.conf.listen_url)) # 增加自己
msg = InstallSnapshotReq.new(self.current_term, self.leader_id,
self.snapshot_last_included_index,
self.snapshot_last_included_term,
i.offset, i.data, i.done, conf, peers,
self.id, peer.id)
await peer.send(msg)
if hasattr(self, "next_index"):
self.next_index[peer.get_id()] = self.snapshot_last_included_index
def update_term(self, term: int):
self.current_term = term
self.voted_for = 0
def logs_append(self, entity: Entity):
self.logs.append(entity)
def get_heartbeat_interval(self):
return self.conf.heartbeat_interval / 1000
def get_election_timeout(self) -> int:
if not self.election_timeout:
slat = self.conf.election_timeout * 0.1
self.election_timeout = self.conf.election_timeout +\
random.randint(0 - slat, slat)
return self.election_timeout / 1000
def apply_config_change(self, entity: Entity):
if entity.type is not EntityType.conf_change:
return
data = entity.data
if data.type is ConfChangeType.add_node:
self.add_peer(self.peer_factory(data.id, data.url))
if data.type is ConfChangeType.remove_node:
for i in self.peers:
if i.get_id() is data.id:
self.peers.remove(i)
break
def get_now_millisecond(self) -> int:
return int(time.time() * 1000)
def state_change_notify(self, new_state: 'State'):
for observer in self.observers:
observer(self, new_state)
self.role_changed = True
def add_state_observer(self, handler: Callable[['State', 'State'],
NoReturn]):
self.observers.append(handler)
class Learner(State):
def __init__(self):
self.join_url: T_URL = None
self.listen_url: T_URL = None
super().__init__()
@classmethod
def become(cls, s):
pass
def tick(self):
pass
def join(self, listen_url: T_URL, join_url: T_URL):
self.join_url = join_url
self.listen_url = listen_url
teacher = self.peer_factory(0, self.join_url)
msg = LearnReq.new(0, self.listen_url)
asyncio.get_running_loop().call_soon(
lambda: asyncio.create_task(self.learn(teacher, msg)))
async def learn(self, peer: Peer, message: Message):
res = await peer.send(message)
if not res or not hasattr(res, "confirmed") or not res.confirmed:
sys.exit("can't join {}:{}".format(*self.join_url))
def message(self, msg: Message):
if msg.type is MessageType.HEARTBEAT_REQ:
return self.heartbeat(msg)
elif msg.type is MessageType.INSTALL_SNAPSHOT_REQ:
return self.install_snapshot(msg)
else:
return ErrorMessage.new(self.current_term, 100001,
"unknown message type.", self.id,
msg.come_from)
def install_snapshot(self, msg: InstallSnapshotReq):
result = InstallSnapshotRes.new(self.current_term)
if msg.term < self.current_term:
return result
self.current_term = msg.term
self.snapshot_installing = not msg.done
self.snapshot_last_included_index = msg.last_included_index
self.snapshot_last_included_term = msg.last_included_term
if msg.offset == 0:
self.logs.reset(msg.last_included_index, msg.last_included_term)
self.state_machine.load_from_snapshot(msg.offset, msg.data)
self.peers.clear()
for i in msg.peers:
self.add_peer(self.peer_factory(i[0], i[1:]))
self.conf.election_timeout = msg.conf["election_timeout"]
self.conf.heartbeat_interval = msg.conf["heartbeat_interval"]
self.leader_id = msg.leader_id
if msg.done:
asyncio.get_running_loop().call_soon(
lambda: asyncio.create_task(self.join_to_leader()))
return result
async def join_to_leader(self):
leader = None
for i in self.peers:
if i.get_id() == self.leader_id:
leader = i
break
if not leader:
sys.exit("can't join leader id: {}".format(self.leader_id))
msg = LearnerJoinReq.new(self.current_term, self.listen_url)
res = await leader.send(msg)
if not res or not isinstance(res, LearnerJoinRes) or not res.id:
sys.exit("can't join leader {}:{}".format(*leader.get_url()))
self.id = res.id
def heartbeat(self, msg: HeartBeatReq) -> Message:
# 因为 leader 的term 比自己小,响应后 leader 会转变为 follower
if msg.term < self.current_term:
return HeartBeatRes.new(self.current_term, False, self.id,
msg.come_from)
self.latext_heart_beat_time = self.get_now_millisecond()
if msg.term > self.current_term:
self.update_term(msg.term)
# 走到这里说明已经确认和接受 leader 了
self.leader_id = msg.leader_id
if self.snapshot_installing:
return HeartBeatRes.new(self.current_term, False, self.id,
msg.come_from)
if msg.prev_log_index > 0:
try:
prev_log = self.logs.get(msg.prev_log_index)
if not prev_log or prev_log.term != msg.prev_log_term:
return HeartBeatRes.new(self.current_term, False, self.id,
msg.come_from)
except praft.exceptions.LogHasDeleted:
for i in msg.entries:
if i.index < self.snapshot_last_included_index:
continue
if i.index == self.snapshot_last_included_index:
if i.term != self.snapshot_last_included_term:
return HeartBeatRes.new(self.current_term, False,
self.id, msg.come_from)
self.logs.replication(msg.entries)
commited = min(msg.leader_commit, msg.entries[-1].index) if \
msg.entries else msg.leader_commit
if commited > self.commit_index:
self.commit(commited)
self.apply()
if not msg.is_learner:
follower = Follower.become(self)
self.state_change_notify(follower)
return HeartBeatRes.new(self.current_term, True, self.id,
msg.come_from)
class Follower(State):
def __init__(self):
self.latext_heart_beat_time = 0
super().__init__()
@classmethod
def become(cls, old_state: State) -> 'Follower':
s = cls()
s.copy(old_state)
loop = asyncio.get_running_loop()
loop.call_later(s.get_election_timeout(), s.tick)
return s
def message(self, msg: Message):
if msg.type is MessageType.PREV_VOTE_REQ:
return self.pre_vote(msg)
elif msg.type is MessageType.VOTE_REQ:
return self.vote(msg)
elif msg.type is MessageType.HEARTBEAT_REQ:
return self.heartbeat(msg)
elif msg.type is MessageType.START_ELECTION_REQ:
return self.start_election(msg)
elif msg.type is MessageType.INSTALL_SNAPSHOT_REQ:
return self.install_snapshot(msg)
elif msg.type is MessageType.LEARN_REQ:
return self.learn_req(msg)
else:
return ErrorMessage.new(self.current_term, 100001,
"unknown message type.", self.id,
msg.come_from)
def pre_vote(self, msg: PrevVoteReq) -> Message:
if self.current_term > msg.term:
return PrevVoteRes.new(self.current_term, False, self.id,
msg.come_from)
if self.election_timeout_time < 1: # 有过超时才会投出选票
return PrevVoteRes.new(self.current_term, False, self.id,
msg.come_from)
if self.voted_for and self.voted_for != msg.come_from:
return PrevVoteRes.new(self.current_term, False, self.id,
msg.come_from)
# 都通过了,开始校验日志
latest_log = self.logs.get_last_entity()
if msg.last_log_index >= max(latest_log.index,
self.snapshot_last_included_index) and\
msg.last_log_term >= max(latest_log.term,
self.snapshot_last_included_term):
return PrevVoteRes.new(self.current_term, True, self.id,
msg.come_from)
return PrevVoteRes.new(self.current_term, False, self.id,
msg.come_from)
def vote(self, msg: VoteReq) -> Message:
if self.current_term > msg.term:
return VoteRes.new(self.current_term, False, self.id,
msg.come_from)
# 当 term 比较大的时候更新 term
if msg.term > self.current_term:
self.update_term(msg.term)
if self.voted_for and self.voted_for != msg.come_from:
return VoteRes.new(self.current_term, False, self.id,
msg.come_from)
# 都通过了,开始校验日志
latest_log = self.logs.get_last_entity()
if msg.last_log_index >= max(latest_log.index,
self.snapshot_last_included_index) and\
msg.last_log_term >= max(latest_log.term,
self.snapshot_last_included_term):
self.voted_for = msg.come_from
return VoteRes.new(self.current_term, True, self.id,
msg.come_from)
return VoteRes.new(self.current_term, False, self.id,
msg.come_from)
def heartbeat(self, msg: HeartBeatReq) -> Message:
# 因为 leader 的term 比自己小,响应后 leader 会转变为 follower
if msg.term < self.current_term:
return HeartBeatRes.new(self.current_term, False, self.id,
msg.come_from)
self.latext_heart_beat_time = self.get_now_millisecond()
if msg.term > self.current_term:
self.update_term(msg.term)
# 走到这里说明已经确认和接受 leader 了
self.leader_id = msg.leader_id
if self.snapshot_installing:
return HeartBeatRes.new(self.current_term, False, self.id,
msg.come_from)
if msg.prev_log_index > 0:
try:
prev_log = self.logs.get(msg.prev_log_index)
if not prev_log or prev_log.term != msg.prev_log_term:
return HeartBeatRes.new(self.current_term, False, self.id,
msg.come_from)
except praft.exceptions.LogHasDeleted:
for i in msg.entries:
if i.index < self.snapshot_last_included_index:
continue
if i.index == self.snapshot_last_included_index:
if i.term != self.snapshot_last_included_term:
return HeartBeatRes.new(self.current_term, False,
self.id, msg.come_from)
self.logs.replication(msg.entries)
commited = min(msg.leader_commit, msg.entries[-1].index) if \
msg.entries else msg.leader_commit
if commited > self.commit_index:
self.commit(commited)
self.apply()
return HeartBeatRes.new(self.current_term, True, self.id,
msg.come_from)
def start_election(self, msg: Message):
result = StartElectionRes.new(self.current_term, False, self.id,
msg.come_from)
if self.current_term != msg.term:
return result
if self.logs.get_last_index() < msg.leader_latest_index:
return result
if self.commit_index < msg.leader_commit:
return result
# skip prevote
candidate = Candidate.become(self)
self.state_change_notify(candidate)
loop = asyncio.get_running_loop()
loop.call_soon(lambda: loop.create_task(candidate.election()))
result.confirmed = True
del(self)
return result
def learn_req(self, msg: LearnReq):
peer = self.peer_factory(0, msg.url)
asyncio.get_running_loop().call_soon(
lambda: asyncio.create_task(self.send_snapshot(peer)))
return LearnRes.new(self.current_term, True)
def install_snapshot(self, msg: InstallSnapshotReq):
result = InstallSnapshotRes(self.current_term)
if msg.term < self.current_term:
return result
self.current_term = msg.term
self.snapshot_installing = msg.done
self.snapshot_last_included_index = msg.last_included_index
self.snapshot_last_included_term = msg.last_included_term
if msg.offset == 0:
self.logs.reset(msg.last_included_index, msg.last_included_term)
self.state_machine.load_from_snapshot(msg.offset, msg.data)
def tick(self):
if self.role_changed:
return
self.apply()
now = self.get_now_millisecond()
loop = asyncio.get_running_loop()
if self.latext_heart_beat_time + self.get_election_timeout() * 1000\
> now:
loop.call_later(self.get_election_timeout(), self.tick)
else:
candidate = PreCandidate.become(self)
self.state_change_notify(candidate)
loop.call_soon(lambda: loop.create_task(candidate.election()))
del(self)
class PreCandidate(State):
def __init__(self, **kvargs):
self.vote_timeout: float = 0
self.vote_result = []
self.election_start_at = 0
# 此次选举是否已经结束,处理异步请求回调的重复处理
self.election_end = False
super().__init__(**kvargs)
def reset(self):
self.vote_timeout: float = random.randint(150, 500) / 1000 # 投票超时
self.vote_result = []
self.election_start_at = 0
self.election_end = False
self.election_timeout_time += 1
def get_vote_timeout(self) -> float:
return self.vote_timeout
def apply(self):
raise praft.exceptions.RoleCanNotDo()
def commit(self):
raise praft.exceptions.RoleCanNotDo()
def add_peer(self):
raise praft.exceptions.RoleCanNotDo()
@classmethod
def become(cls, old_state: State) -> 'PreCandidate':
if not isinstance(old_state, Follower):
# 只能从 follower 转变过来。
raise praft.exceptions.UnknownError()
candidate = cls()
candidate.copy(old_state)
candidate.reset()
return candidate
def message(self, msg: Message):
# 所有预投票都返回失败
if msg.type is MessageType.PREV_VOTE_REQ:
return PrevVoteRes.new(self.current_term,
msg.term >= self.current_term, self.id,
msg.come_from)
if (msg.term > self.current_term) or \
(msg.term == self.current_term and msg.type is
MessageType.HEARTBEAT_REQ):
follower = Follower.become(self)
self.election_end = True
self.state_change_notify(follower)
del(self)
return follower.message(msg)
# 其他的请求一律都失败
if msg.type is MessageType.HEARTBEAT_REQ:
return HeartBeatRes.new(self.current_term, False, self.id,
msg.come_from)
elif msg.type is MessageType.VOTE_REQ:
return VoteRes.new(self.current_term, False, self.id,
msg.come_from)
else:
return ErrorMessage.new(self.current_term, 100001,
"unknown message type.", self.id,
msg.come_from)
async def election(self):
self.election_start_at = self.get_now_millisecond()
asyncio.get_running_loop().call_later(
self.get_vote_timeout(), self.tick)
await self.vote()
async def vote(self):
# 首先预投票
self.vote_result.append(True) # 投给自己
last_log = self.logs.get_last_entity()
message = PrevVoteReq.new(self.current_term + 1, self.id,
last_log.index, last_log.term)
for peer in self.peers:
asyncio.create_task(self.talk_peer(peer, copy.copy(message)))
async def talk_peer(self, peer: Peer, message: Message):
message.set_from_to(self.id, peer.get_id())
res = await peer.send(message)
if self.election_end:
return
if not res:
return
if res.term > self.current_term:
follower = Follower.become(self)
self.election_end = True
self.state_change_notify(follower)
del(self)
return
self.vote_result.append(res.vote_granted)
if self.vote_result.count(True) > self.get_quorum():
self.vote_succ()
def tick(self) -> StateResult:
if self.role_changed:
return
if self.election_end:
return
now = self.get_now_millisecond()
if self.election_start_at + self.get_vote_timeout() > now:
return
# 这里能选举通过主要是为了兼容单台的场景
if self.vote_result.count(True) < self.get_quorum():
self.reset()
loop = asyncio.get_running_loop()
loop.call_soon(lambda: asyncio.create_task(self.election()))
return
else:
self.vote_succ()
def vote_succ(self):
candidate = Candidate.become(self)
self.election_end = True
self.state_change_notify(candidate)
candidate.reset()
asyncio.get_running_loop().call_soon(
lambda: asyncio.create_task(candidate.election()))
del(self)
class Candidate(PreCandidate):
def reset(self):
self.current_term += 1
super().reset()
@classmethod
def become(cls, pre_candidate: PreCandidate) -> 'Candidate':
candidate = cls()
candidate.copy(pre_candidate)
candidate.reset()
return candidate
async def election(self):
self.vote_result.append(True)
self.election_start_at = self.get_now_millisecond()
asyncio.get_running_loop().call_later(
self.get_vote_timeout(), self.tick)
asyncio.create_task(self.vote())
async def vote(self):
# 开始正式投票
last_log = self.logs.get_last_entity()
message = VoteReq.new(self.current_term, self.id,
last_log.index, last_log.term)
for peer in self.peers:
asyncio.create_task(self.talk_peer(peer, copy.copy(message)))
async def talk_peer(self, peer: Peer, message: Message):
message.set_from_to(self.id, peer.get_id())
res = await peer.send(message)
if self.election_end:
return
if not res:
return
if res.term > self.current_term:
follower = Follower.become(self)
self.election_end = True
self.state_change_notify(follower)
del(self)
return
self.vote_result.append(res.vote_granted)
if self.vote_result.count(True) > self.get_quorum():
self.vote_succ()
return res
def tick(self) -> StateResult:
if self.role_changed:
return
if self.election_end:
return
now = self.get_now_millisecond()
if self.election_start_at + self.get_vote_timeout() > now:
return
# 这里能选举通过主要是为了兼容单台的场景
if self.vote_result.count(True) < self.get_quorum():
self.reset()
loop = asyncio.get_running_loop()
loop.call_soon(lambda: asyncio.create_task(self.election()))
else:
self.vote_succ()
def vote_succ(self):
leader = Leader.become(self)
self.election_end = True
self.state_change_notify(leader)
leader.elected()
del(self)
class Leader(State):
def __init__(self, **kvargs):
self.next_index: Dict[int, int] = dict()
self.match_index: Dict[int, int] = dict()
self.learners: List[Peer] = []
super().__init__(**kvargs)
@classmethod
def become(cls, candidate: Candidate) -> 'Leader':
s = cls()
s.copy(candidate)
s.leader_id = s.id
return s
def read(self, param) -> object:
return self.state_machine.read(param)
def commit(self, index: int):
self.commit_index = index
def elected(self):
# 当选后开始同步
last_log = self.logs.get_last_entity()
message = HeartBeatReq.new(self.current_term, self.id, last_log.index,
last_log.term, self.commit_index, [])
loop = asyncio.get_running_loop()
for peer in self.peers:
loop.call_soon(lambda: asyncio.create_task(
self.peer_log_append(peer, copy.copy(message))))
loop.call_later(self.get_heartbeat_interval(), self.tick)
async def peer_log_append(self, peer: Peer, message: HeartBeatReq = None,
loop: bool = True) -> bool:
if message is None:
try:
message = self.create_peer_message(peer)
except praft.exceptions.LogHasDeleted:
return False
message.set_from_to(self.id, peer.id)
res = await peer.send(message)
if not res:
return False
if res.term > self.current_term:
follower = Follower.become(self)
self.state_change_notify(follower)
del(self)
return
match_index = message.prev_log_index
next_index = match_index + 1
if res.success:
if len(message.entries):
match_index = message.entries[-1].index
next_index = match_index + 1
self.next_index[peer.get_id()] = next_index
self.match_index[peer.get_id()] = match_index
return True
else:
if loop:
next_index = max(0, message.prev_log_index - 50)
self.next_index[peer.get_id()] = next_index
if self.role_changed:
return False
if next_index <= self.snapshot_last_included_index:
# 安装快照
asyncio.create_task(self.send_snapshot(peer))
return False
def create_peer_message(self, peer: Peer) -> HeartBeatReq:
next_index = self.next_index.get(peer.id, 0)
entries = self.logs.read_from(next_index, 50)
if entries:
if (entries[0].index - 1) == 0:
prev_log_index = 0
prev_log_term = 0
else:
prev_log = self.logs.get(entries[0].index - 1)
prev_log_index = prev_log.index
prev_log_term = prev_log.term
else:
last_log = self.logs.get_last_entity()
prev_log_index = last_log.index
prev_log_term = last_log.term
is_learner = False
if peer in self.learners:
is_learner = True
if (peer.get_id() in self.match_index.keys()) and\
self.match_index[peer.get_id()] >= \
self.logs.get_last_index():
is_learner = False
self.learners.remove(peer)
message = HeartBeatReq.new(self.current_term, self.id,
prev_log_index,
prev_log_term,
self.commit_index, entries, is_learner)
return message
def message(self, msg: Message):
if msg.type is MessageType.PREV_VOTE_REQ:
return PrevVoteRes.new(self.current_term, False, self.id,
msg.come_from)
if msg.term > self.current_term:
follower = Follower.become(self)
self.state_change_notify(follower)
return follower.message(msg)
elif msg.type is MessageType.VOTE_REQ:
return VoteRes.new(self.current_term, False, self.id,
msg.come_from)
elif msg.type is MessageType.HEARTBEAT_REQ:
return HeartBeatRes.new(self.current_term, False, self.id,
msg.come_from)
elif msg.type is MessageType.LEARN_REQ:
return self.learn_req(msg)
elif msg.type is MessageType.LEARNER_JOIN_REQ:
return self.learner_join(msg)
else:
return ErrorMessage.new(self.current_term, 100001,
"unknown message type.", self.id,
msg.come_from)
def learn_req(self, msg: LearnReq):
peer = self.peer_factory(0, msg.url)
asyncio.get_running_loop().call_soon(
lambda: asyncio.create_task(self.send_snapshot(peer)))
return LearnRes.new(self.current_term, True)
def get_next_peer_id(self):
max_id = 0
for i in self.peers:
max_id = max(max_id, i.get_id(), self.id)
return max_id + 1
def learner_join(self, msg: LearnerJoinReq):
learner_id = self.get_next_peer_id()
for i in self.peers:
if i.get_url() == msg.url:
return LearnerJoinRes.new(self.current_term, learner_id)
conf_data = ConfChangeData(
ConfChangeType.add_node, learner_id, msg.url)
target_index = self.logs.get_last_index() + 1
entity = Entity(self.current_term, target_index,
EntityType.conf_change, conf_data)
self.logs.append(entity)
self.commit(target_index)
self.apply()
return LearnerJoinRes.new(self.current_term, learner_id)
def leadership_transfer(self):
target_id = 0
latest_index = self.logs.get_last_index()
for peer_id, index in self.match_index.items():
if index >= self.last_applied and index >= latest_index:
target_id = peer_id
break
if not target_id:
raise praft.exceptions.CanNotDoOperation()
target_peer = None
for peer in self.peers:
if peer.get_id() is target_id:
target_peer = peer
if not target_peer:
raise praft.exceptions.CanNotDoOperation()
msg = StartElectionReq.new(self.current_term, self.commit_index,
latest_index, self.id, peer.id)
res = peer.start_election(msg)
return res.confirmed
async def remove_node(self, url: T_URL):
remove_node = None
for i in self.peers:
if i.get_url() == url:
remove_node = i
break
if not remove_node:
return
target_index = self.logs.get_last_index() + 1
entity = Entity(self.current_term, target_index,
EntityType.conf_change,
ConfChangeData(ConfChangeType.remove_node,
remove_node.get_id(),
remove_node.get_url()))
self.logs.append(entity)
self.match_index[self.id] = target_index
tasks = set()
try:
for peer in self.peers:
tasks.add(asyncio.create_task(self.peer_log_append(peer)))
for peer_result in asyncio.as_completed(tasks):
peer_result = await peer_result
succ_peer_num = sum(map(lambda x: x >= target_index,
self.match_index.values()))
if succ_peer_num >= self.get_quorum():
break
# 所有的请求都结束之后重新计算
succ_peer_num = sum(map(lambda x: x >= target_index,
self.match_index.values()))
if succ_peer_num >= self.get_quorum():
self.commit(target_index)
self.apply()
return
except praft.exceptions.LeaderDemoted:
raise praft.exceptions.LeaderDemoted()
async def propose(self, data: object) -> object:
target_index = self.logs.get_last_index() + 1
entity = Entity(self.current_term, target_index, EntityType.normal,
data)
self.logs.append(entity)
self.match_index[self.id] = target_index
tasks = set()
try:
for peer in self.peers:
tasks.add(asyncio.create_task(self.peer_log_append(peer)))
for peer_result in asyncio.as_completed(tasks):
peer_result = await peer_result
succ_peer_num = sum(map(lambda x: x >= target_index,
self.match_index.values()))
if succ_peer_num >= self.get_quorum():
break
# 所有的请求都结束之后重新计算
succ_peer_num = sum(map(lambda x: x >= target_index,
self.match_index.values()))
if succ_peer_num >= self.get_quorum():
self.commit(target_index)
self.apply()
return data
except praft.exceptions.LeaderDemoted:
raise praft.exceptions.LeaderDemoted()
def tick(self):
if self.role_changed:
return
self.apply()
loop = asyncio.get_running_loop()
for peer in self.peers:
asyncio.create_task(self.peer_log_append(peer))
loop.call_later(
self.get_heartbeat_interval(), self.tick)
|
[
"praft.message.LearnReq.new",
"praft.message.VoteReq.new",
"praft.message.LearnerJoinRes.new",
"asyncio.as_completed",
"praft.message.ErrorMessage.new",
"praft.message.VoteRes.new",
"random.randint",
"praft.message.HeartBeatRes.new",
"praft.message.HeartBeatReq.new",
"praft.message.InstallSnapshotRes.new",
"praft.message.InstallSnapshotReq.new",
"praft.message.LearnRes.new",
"praft.message.StartElectionReq.new",
"praft.message.InstallSnapshotRes",
"praft.message.PrevVoteRes.new",
"praft.log.ConfChangeData",
"copy.copy",
"asyncio.get_running_loop",
"praft.log.Entity",
"time.time",
"praft.message.PrevVoteReq.new",
"praft.message.LearnerJoinReq.new",
"praft.message.StartElectionRes.new"
] |
[((7743, 7775), 'praft.message.LearnReq.new', 'LearnReq.new', (['(0)', 'self.listen_url'], {}), '(0, self.listen_url)\n', (7755, 7775), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((8629, 8670), 'praft.message.InstallSnapshotRes.new', 'InstallSnapshotRes.new', (['self.current_term'], {}), '(self.current_term)\n', (8651, 8670), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((9870, 9924), 'praft.message.LearnerJoinReq.new', 'LearnerJoinReq.new', (['self.current_term', 'self.listen_url'], {}), '(self.current_term, self.listen_url)\n', (9888, 9924), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((11991, 12056), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(True)', 'self.id', 'msg.come_from'], {}), '(self.current_term, True, self.id, msg.come_from)\n', (12007, 12056), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((12336, 12362), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (12360, 12362), False, 'import asyncio\n'), ((14273, 14338), 'praft.message.PrevVoteRes.new', 'PrevVoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (14288, 14338), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((15363, 15424), 'praft.message.VoteRes.new', 'VoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (15374, 15424), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((17186, 17251), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(True)', 'self.id', 'msg.come_from'], {}), '(self.current_term, True, self.id, msg.come_from)\n', (17202, 17251), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((17346, 17416), 'praft.message.StartElectionRes.new', 'StartElectionRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (17366, 17416), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((17817, 17843), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (17841, 17843), False, 'import asyncio\n'), ((18201, 18238), 'praft.message.LearnRes.new', 'LearnRes.new', (['self.current_term', '(True)'], {}), '(self.current_term, True)\n', (18213, 18238), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((18314, 18351), 'praft.message.InstallSnapshotRes', 'InstallSnapshotRes', (['self.current_term'], {}), '(self.current_term)\n', (18332, 18351), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((18954, 18980), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (18978, 18980), False, 'import asyncio\n'), ((22015, 22093), 'praft.message.PrevVoteReq.new', 'PrevVoteReq.new', (['(self.current_term + 1)', 'self.id', 'last_log.index', 'last_log.term'], {}), '(self.current_term + 1, self.id, last_log.index, last_log.term)\n', (22030, 22093), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((24368, 24438), 'praft.message.VoteReq.new', 'VoteReq.new', (['self.current_term', 'self.id', 'last_log.index', 'last_log.term'], {}), '(self.current_term, self.id, last_log.index, last_log.term)\n', (24379, 24438), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((26561, 26663), 'praft.message.HeartBeatReq.new', 'HeartBeatReq.new', (['self.current_term', 'self.id', 'last_log.index', 'last_log.term', 'self.commit_index', '[]'], {}), '(self.current_term, self.id, last_log.index, last_log.term,\n self.commit_index, [])\n', (26577, 26663), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((26710, 26736), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (26734, 26736), False, 'import asyncio\n'), ((29367, 29486), 'praft.message.HeartBeatReq.new', 'HeartBeatReq.new', (['self.current_term', 'self.id', 'prev_log_index', 'prev_log_term', 'self.commit_index', 'entries', 'is_learner'], {}), '(self.current_term, self.id, prev_log_index, prev_log_term,\n self.commit_index, entries, is_learner)\n', (29383, 29486), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((30922, 30959), 'praft.message.LearnRes.new', 'LearnRes.new', (['self.current_term', '(True)'], {}), '(self.current_term, True)\n', (30934, 30959), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((31378, 31438), 'praft.log.ConfChangeData', 'ConfChangeData', (['ConfChangeType.add_node', 'learner_id', 'msg.url'], {}), '(ConfChangeType.add_node, learner_id, msg.url)\n', (31392, 31438), False, 'from praft.log import Entity, EntityType, ConfChangeType, ConfChangeData\n'), ((31523, 31597), 'praft.log.Entity', 'Entity', (['self.current_term', 'target_index', 'EntityType.conf_change', 'conf_data'], {}), '(self.current_term, target_index, EntityType.conf_change, conf_data)\n', (31529, 31597), False, 'from praft.log import Entity, EntityType, ConfChangeType, ConfChangeData\n'), ((31725, 31774), 'praft.message.LearnerJoinRes.new', 'LearnerJoinRes.new', (['self.current_term', 'learner_id'], {}), '(self.current_term, learner_id)\n', (31743, 31774), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((32384, 32478), 'praft.message.StartElectionReq.new', 'StartElectionReq.new', (['self.current_term', 'self.commit_index', 'latest_index', 'self.id', 'peer.id'], {}), '(self.current_term, self.commit_index, latest_index,\n self.id, peer.id)\n', (32404, 32478), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((34243, 34307), 'praft.log.Entity', 'Entity', (['self.current_term', 'target_index', 'EntityType.normal', 'data'], {}), '(self.current_term, target_index, EntityType.normal, data)\n', (34249, 34307), False, 'from praft.log import Entity, EntityType, ConfChangeType, ConfChangeData\n'), ((35395, 35421), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (35419, 35421), False, 'import asyncio\n'), ((5371, 5564), 'praft.message.InstallSnapshotReq.new', 'InstallSnapshotReq.new', (['self.current_term', 'self.leader_id', 'self.snapshot_last_included_index', 'self.snapshot_last_included_term', 'i.offset', 'i.data', 'i.done', 'conf', 'peers', 'self.id', 'peer.id'], {}), '(self.current_term, self.leader_id, self.\n snapshot_last_included_index, self.snapshot_last_included_term, i.\n offset, i.data, i.done, conf, peers, self.id, peer.id)\n', (5393, 5564), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((10307, 10373), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (10323, 10373), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((10683, 10749), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (10699, 10749), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((13341, 13406), 'praft.message.PrevVoteRes.new', 'PrevVoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (13356, 13406), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((13518, 13583), 'praft.message.PrevVoteRes.new', 'PrevVoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (13533, 13583), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((13701, 13766), 'praft.message.PrevVoteRes.new', 'PrevVoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (13716, 13766), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((14158, 14222), 'praft.message.PrevVoteRes.new', 'PrevVoteRes.new', (['self.current_term', '(True)', 'self.id', 'msg.come_from'], {}), '(self.current_term, True, self.id, msg.come_from)\n', (14173, 14222), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((14476, 14537), 'praft.message.VoteRes.new', 'VoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (14487, 14537), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((14764, 14825), 'praft.message.VoteRes.new', 'VoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (14775, 14825), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((15256, 15316), 'praft.message.VoteRes.new', 'VoteRes.new', (['self.current_term', '(True)', 'self.id', 'msg.come_from'], {}), '(self.current_term, True, self.id, msg.come_from)\n', (15267, 15316), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((15624, 15690), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (15640, 15690), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((16000, 16066), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (16016, 16066), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((19685, 19709), 'random.randint', 'random.randint', (['(150)', '(500)'], {}), '(150, 500)\n', (19699, 19709), False, 'import random\n'), ((20592, 20685), 'praft.message.PrevVoteRes.new', 'PrevVoteRes.new', (['self.current_term', '(msg.term >= self.current_term)', 'self.id', 'msg.come_from'], {}), '(self.current_term, msg.term >= self.current_term, self.id,\n msg.come_from)\n', (20607, 20685), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((21189, 21255), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (21205, 21255), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((23225, 23251), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (23249, 23251), False, 'import asyncio\n'), ((25587, 25613), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (25611, 25613), False, 'import asyncio\n'), ((29718, 29783), 'praft.message.PrevVoteRes.new', 'PrevVoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (29733, 29783), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((33426, 33453), 'asyncio.as_completed', 'asyncio.as_completed', (['tasks'], {}), '(tasks)\n', (33446, 33453), False, 'import asyncio\n'), ((34591, 34618), 'asyncio.as_completed', 'asyncio.as_completed', (['tasks'], {}), '(tasks)\n', (34611, 34618), False, 'import asyncio\n'), ((6353, 6383), 'random.randint', 'random.randint', (['(0 - slat)', 'slat'], {}), '(0 - slat, slat)\n', (6367, 6383), False, 'import random\n'), ((6949, 6960), 'time.time', 'time.time', ([], {}), '()\n', (6958, 6960), False, 'import time\n'), ((7784, 7810), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (7808, 7810), False, 'import asyncio\n'), ((8389, 8486), 'praft.message.ErrorMessage.new', 'ErrorMessage.new', (['self.current_term', '(100001)', '"""unknown message type."""', 'self.id', 'msg.come_from'], {}), "(self.current_term, 100001, 'unknown message type.', self.\n id, msg.come_from)\n", (8405, 8486), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((18081, 18107), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (18105, 18107), False, 'import asyncio\n'), ((21358, 21419), 'praft.message.VoteRes.new', 'VoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (21369, 21419), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((21484, 21581), 'praft.message.ErrorMessage.new', 'ErrorMessage.new', (['self.current_term', '(100001)', '"""unknown message type."""', 'self.id', 'msg.come_from'], {}), "(self.current_term, 100001, 'unknown message type.', self.\n id, msg.come_from)\n", (21500, 21581), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((21748, 21774), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (21772, 21774), False, 'import asyncio\n'), ((23567, 23593), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (23591, 23593), False, 'import asyncio\n'), ((24131, 24157), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (24155, 24157), False, 'import asyncio\n'), ((30059, 30120), 'praft.message.VoteRes.new', 'VoteRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (30070, 30120), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((30802, 30828), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (30826, 30828), False, 'import asyncio\n'), ((31307, 31356), 'praft.message.LearnerJoinRes.new', 'LearnerJoinRes.new', (['self.current_term', 'learner_id'], {}), '(self.current_term, learner_id)\n', (31325, 31356), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((9451, 9477), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (9475, 9477), False, 'import asyncio\n'), ((10997, 11063), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (11013, 11063), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((16314, 16380), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (16330, 16380), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((22213, 22231), 'copy.copy', 'copy.copy', (['message'], {}), '(message)\n', (22222, 22231), False, 'import copy\n'), ((24554, 24572), 'copy.copy', 'copy.copy', (['message'], {}), '(message)\n', (24563, 24572), False, 'import copy\n'), ((30223, 30289), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (30239, 30289), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((26868, 26886), 'copy.copy', 'copy.copy', (['message'], {}), '(message)\n', (26877, 26886), False, 'import copy\n'), ((30543, 30640), 'praft.message.ErrorMessage.new', 'ErrorMessage.new', (['self.current_term', '(100001)', '"""unknown message type."""', 'self.id', 'msg.come_from'], {}), "(self.current_term, 100001, 'unknown message type.', self.\n id, msg.come_from)\n", (30559, 30640), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((11473, 11539), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (11489, 11539), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((13062, 13159), 'praft.message.ErrorMessage.new', 'ErrorMessage.new', (['self.current_term', '(100001)', '"""unknown message type."""', 'self.id', 'msg.come_from'], {}), "(self.current_term, 100001, 'unknown message type.', self.\n id, msg.come_from)\n", (13078, 13159), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n'), ((16790, 16856), 'praft.message.HeartBeatRes.new', 'HeartBeatRes.new', (['self.current_term', '(False)', 'self.id', 'msg.come_from'], {}), '(self.current_term, False, self.id, msg.come_from)\n', (16806, 16856), False, 'from praft.message import Message, MessageType, HeartBeatReq, HeartBeatRes, PrevVoteReq, PrevVoteRes, VoteReq, VoteRes, StartElectionReq, StartElectionRes, InstallSnapshotReq, InstallSnapshotRes, LearnReq, LearnRes, LearnerJoinReq, LearnerJoinRes, ErrorMessage\n')]
|
# -*- coding: utf-8 -*-
# framework
from django.core.urlresolvers import reverse
from django.contrib import admin
from django.contrib.admin.views import main as admin_views
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponseRedirect
# module local
from . import models, forms, dataapi
class ChangeList(admin_views.ChangeList):
@property
def available_settings_models(self):
return dataapi.data.type_names()
# Additional columns
def get_setting_value(obj):
return dataapi.data.get(obj.name)
get_setting_value.short_description = _('Value')
# end
# Actions
def clear_cache(modeladmin, request, queryset):
data = dataapi.data
cache_keys = []
add_key = cache_keys.append
for name in queryset.values_list('name', flat=True):
add_key(data.get._cache_key([name], {}))
add_key(data.exists._cache_key([name], {}))
data.cache.delete_many(cache_keys)
clear_cache.short_description = _("Clear cache for settings")
# end
class SettingAdmin(admin.ModelAdmin):
model = models.Setting
form = forms.SettingForm
list_display = ('name', 'setting_type', get_setting_value)
search_fields = ('name', 'setting_type__name')
actions = [
clear_cache
]
def get_setting_model(self, obj, request):
if obj:
return obj.setting_object.__class__
try:
typename = request.REQUEST['typename'] # NOTE: both lines might
return dataapi.data.model_for_name(typename) # raise KeyError
except KeyError:
raise Http404
def get_form(self, request, obj=None, **kwargs):
Form = super(SettingAdmin, self).get_form(request, obj=obj, **kwargs)
Form.setting_model = self.get_setting_model(obj, request)
return Form
def get_changelist(self, request, **kwargs):
return ChangeList
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
setting_model = self.get_setting_model(obj, request)
context['setting_model_name'] = setting_model.__name__
response = super(SettingAdmin, self).render_change_form(
request, context, add=add, change=change, form_url=form_url, obj=obj)
return response
def _response_url(self, url, typename):
return HttpResponseRedirect('%(url)s?typename=%(typename)s' % {
'url': url,
'typename': typename,
})
def response_add(self, request, obj, post_url_continue='../%s/'):
response = super(SettingAdmin, self).response_add(
request, obj, post_url_continue=post_url_continue)
if '_addanother' in request.POST:
typename = self.get_setting_model(obj, request).__name__
return self._response_url(request.path, typename)
return response
def response_change(self, request, obj):
response = super(SettingAdmin, self).response_change(request, obj)
app_label = obj._meta.app_label
module_name = obj._meta.module_name
if '_addanother' in request.POST:
url_name = 'admin:%s_%s_add' % (app_label, module_name)
url = reverse(url_name, current_app=self.admin_site.name)
typename = self.get_setting_model(obj, request).__name__
return self._response_url(url, typename)
return response
admin.site.register(models.Setting, SettingAdmin)
|
[
"django.utils.translation.ugettext",
"django.http.HttpResponseRedirect",
"django.contrib.admin.site.register",
"django.core.urlresolvers.reverse"
] |
[((599, 609), 'django.utils.translation.ugettext', '_', (['"""Value"""'], {}), "('Value')\n", (600, 609), True, 'from django.utils.translation import ugettext as _\n'), ((981, 1010), 'django.utils.translation.ugettext', '_', (['"""Clear cache for settings"""'], {}), "('Clear cache for settings')\n", (982, 1010), True, 'from django.utils.translation import ugettext as _\n'), ((3405, 3454), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Setting', 'SettingAdmin'], {}), '(models.Setting, SettingAdmin)\n', (3424, 3454), False, 'from django.contrib import admin\n'), ((2353, 2447), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["('%(url)s?typename=%(typename)s' % {'url': url, 'typename': typename})"], {}), "('%(url)s?typename=%(typename)s' % {'url': url,\n 'typename': typename})\n", (2373, 2447), False, 'from django.http import Http404, HttpResponseRedirect\n'), ((3205, 3256), 'django.core.urlresolvers.reverse', 'reverse', (['url_name'], {'current_app': 'self.admin_site.name'}), '(url_name, current_app=self.admin_site.name)\n', (3212, 3256), False, 'from django.core.urlresolvers import reverse\n')]
|
#!/usr/bin/env python
# coding: utf-8
#%% ---- DEPENDENCIES
import matplotlib.pyplot as plt
from skimage import io
import manynames as mn
#%% ---- FUNCTION TO SHOW OBJECT WITH BOUNDING BOX AND NAMES
def show_objects(url, bbox, objname, block_display=True):
im = io.imread(url)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(im, aspect='equal')
if bbox[0] == 0:
bbox[0] = 1
if bbox[1] == 0:
bbox[1] = 1
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2], bbox[3], fill=False,
edgecolor='red', linewidth=2, alpha=0.5)
)
plt.gca().text(bbox[0], bbox[1] - 2,
'%s' % (objname),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=10, color='white')
plt.axis('off')
plt.tight_layout()
plt.draw()
plt.show(block=block_display)
#%% ---- DIRECTLY RUN
if __name__=="__main__":
manynames = mn.load_cleaned_results()
for image_id in [2417690, 2417892, 2388484, 2417993, 2388471, 65, 413, 2417452]:
mn_item = manynames[manynames["vg_image_id"]==image_id]
url = mn_item["link_vg"].values[0]
responses = mn_item["responses"].values[0]
mn_objnames = "MN: "+" / ".join(responses.keys())
bbox = mn_item["target_coord"].values[0]
vg_objname = "VG: "+ mn_item["vg_obj_name"].values[0]
image_name = mn_item["vg_image_name"].values[0]
obj_name = mn_objnames + " (%s)" % vg_objname
show_objects(url, bbox, obj_name)
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"manynames.load_cleaned_results",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"skimage.io.imread"
] |
[((273, 287), 'skimage.io.imread', 'io.imread', (['url'], {}), '(url)\n', (282, 287), False, 'from skimage import io\n'), ((298, 310), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (308, 310), True, 'import matplotlib.pyplot as plt\n'), ((855, 870), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (863, 870), True, 'import matplotlib.pyplot as plt\n'), ((875, 893), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (891, 893), True, 'import matplotlib.pyplot as plt\n'), ((898, 908), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (906, 908), True, 'import matplotlib.pyplot as plt\n'), ((913, 942), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': 'block_display'}), '(block=block_display)\n', (921, 942), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1032), 'manynames.load_cleaned_results', 'mn.load_cleaned_results', ([], {}), '()\n', (1030, 1032), True, 'import manynames as mn\n'), ((500, 609), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(bbox[0], bbox[1])', 'bbox[2]', 'bbox[3]'], {'fill': '(False)', 'edgecolor': '"""red"""', 'linewidth': '(2)', 'alpha': '(0.5)'}), "((bbox[0], bbox[1]), bbox[2], bbox[3], fill=False, edgecolor=\n 'red', linewidth=2, alpha=0.5)\n", (513, 609), True, 'import matplotlib.pyplot as plt\n'), ((471, 480), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (478, 480), True, 'import matplotlib.pyplot as plt\n'), ((671, 680), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (678, 680), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Twitter only supports the application-only flow of OAuth2 for certain
API endpoints. This OAuth2 authenticator only supports the application-only
flow right now.
To authenticate with OAuth2, visit the Twitter developer page and create a new
application:
https://dev.twitter.com/apps/new
This will get you a CONSUMER_KEY and CONSUMER_SECRET.
Exchange your CONSUMER_KEY and CONSUMER_SECRET for a bearer token using the
oauth2_dance function.
Finally, you can use the OAuth2 authenticator and your bearer token to connect
to Twitter. In code it goes like this::
twitter = Twitter(auth=OAuth2(bearer_token=BEARER_TOKEN))
# Now work with Twitter
twitter.search.tweets(q='keyword')
"""
from __future__ import print_function
try:
from urllib.parse import quote, urlencode
except ImportError:
from urllib import quote, urlencode
from base64 import b64encode
from .auth import Auth, MissingCredentialsError
def write_bearer_token_file(filename, oauth2_bearer_token):
"""
Write a token file to hold the oauth2 bearer token.
"""
oauth_file = open(filename, 'w')
print(oauth2_bearer_token, file=oauth_file)
oauth_file.close()
def read_bearer_token_file(filename):
"""
Read a token file and return the oauth2 bearer token.
"""
f = open(filename)
return f.readline().strip()
class OAuth2(Auth):
"""
An OAuth2 application-only authenticator.
"""
def __init__(self, consumer_key=None, consumer_secret=None,
bearer_token=None):
"""
Create an authenticator. You can supply consumer_key and
consumer_secret if you are requesting a bearer_token. Otherwise
you must supply the bearer_token.
"""
self.bearer_token = bearer_token
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not (bearer_token or (consumer_key and consumer_secret)):
raise MissingCredentialsError(
'You must supply either a bearer token, or both a '
'consumer_key and a consumer_secret.')
def encode_params(self, base_url, method, params):
return urlencode(params)
def generate_headers(self):
if self.bearer_token:
headers = {
b'Authorization': 'Bearer {0}'.format(
self.bearer_token).encode('utf8')
}
else:
headers = {
b'Content-Type': (b'application/x-www-form-urlencoded;'
b'charset=UTF-8'),
b'Authorization': 'Basic {0}'.format(
b64encode('{0}:{1}'.format(
quote(self.consumer_key),
quote(self.consumer_secret)).encode('utf8')
).decode('utf8')
).encode('utf8')
}
return headers
|
[
"urllib.quote",
"urllib.urlencode"
] |
[((2169, 2186), 'urllib.urlencode', 'urlencode', (['params'], {}), '(params)\n', (2178, 2186), False, 'from urllib import quote, urlencode\n'), ((2686, 2710), 'urllib.quote', 'quote', (['self.consumer_key'], {}), '(self.consumer_key)\n', (2691, 2710), False, 'from urllib import quote, urlencode\n'), ((2736, 2763), 'urllib.quote', 'quote', (['self.consumer_secret'], {}), '(self.consumer_secret)\n', (2741, 2763), False, 'from urllib import quote, urlencode\n')]
|
"""Handle OAuth from Google API."""
import argparse
import logging
from oauth2client import tools
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
CLIENT_SECRETS_FILE = "client_id.json"
SCOPE = 'https://www.googleapis.com/auth/youtube.force-ssl'
STORAGE_FILE = 'credentials'
logger = logging.getLogger('auth')
def get_cred():
"""
Get authorized credentials object.
If no credentials is stored or it is invalid,
the authorization flow will run.
"""
parser = argparse.ArgumentParser(parents=[tools.argparser])
flags = parser.parse_args()
storage = Storage(STORAGE_FILE)
creds = storage.get()
if not creds or creds.invalid:
flow = flow_from_clientsecrets(
CLIENT_SECRETS_FILE, scope=SCOPE)
flow.params['access_type'] = 'offline'
creds = tools.run_flow(flow, storage, flags)
storage.put(creds)
return creds
|
[
"oauth2client.file.Storage",
"argparse.ArgumentParser",
"oauth2client.client.flow_from_clientsecrets",
"oauth2client.tools.run_flow",
"logging.getLogger"
] |
[((332, 357), 'logging.getLogger', 'logging.getLogger', (['"""auth"""'], {}), "('auth')\n", (349, 357), False, 'import logging\n'), ((532, 582), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'parents': '[tools.argparser]'}), '(parents=[tools.argparser])\n', (555, 582), False, 'import argparse\n'), ((629, 650), 'oauth2client.file.Storage', 'Storage', (['STORAGE_FILE'], {}), '(STORAGE_FILE)\n', (636, 650), False, 'from oauth2client.file import Storage\n'), ((728, 785), 'oauth2client.client.flow_from_clientsecrets', 'flow_from_clientsecrets', (['CLIENT_SECRETS_FILE'], {'scope': 'SCOPE'}), '(CLIENT_SECRETS_FILE, scope=SCOPE)\n', (751, 785), False, 'from oauth2client.client import flow_from_clientsecrets\n'), ((862, 898), 'oauth2client.tools.run_flow', 'tools.run_flow', (['flow', 'storage', 'flags'], {}), '(flow, storage, flags)\n', (876, 898), False, 'from oauth2client import tools\n')]
|
import os
import random
import json
import numpy as np
def separate_data(genre):
d_path = '/h/nng/data/sentiment/aws'
with open(os.path.join(d_path, genre, 'orig', 'raw.json'), 'r') as ifile, \
open(os.path.join(d_path, genre, 'orig', 'train.raw.input0'), 'w') as t0file, \
open(os.path.join(d_path, genre, 'orig', 'train.raw.label'), 'w') as tlfile, \
open(os.path.join(d_path, genre, 'orig', 'valid.raw.input0'), 'w') as v0file, \
open(os.path.join(d_path, genre, 'orig', 'valid.raw.label'), 'w') as vlfile, \
open(os.path.join(d_path, genre, 'orig', 'test.raw.input0'), 'w') as te0file, \
open(os.path.join(d_path, genre, 'orig', 'test.raw.label'), 'w') as telfile, \
open(os.path.join(d_path, genre, 'unlabelled', 'train.raw.input0'), 'w') as ut0file, \
open(os.path.join(d_path, genre, 'unlabelled', 'train.raw.label'), 'w') as utlfile, \
open(os.path.join(d_path, genre, 'unlabelled', 'valid.raw.input0'), 'w') as uv0file, \
open(os.path.join(d_path, genre, 'unlabelled', 'valid.raw.label'), 'w') as uvlfile, \
open(os.path.join(d_path, genre, 'unlabelled', 'test.raw.input0'), 'w') as ute0file, \
open(os.path.join(d_path, genre, 'unlabelled', 'test.raw.label'), 'w') as utelfile:
orig_in_files = [t0file, v0file, te0file]
orig_l_files = [tlfile, vlfile, telfile]
unl_in_files = [ut0file, uv0file, ute0file]
unl_l_files = [utlfile, uvlfile, utelfile]
print("Processing data for {}".format(genre))
lines = []
for line in ifile:
if len(lines) > 2000000:
break
row = json.loads(line)
if 'reviewText' not in row:
continue
text = row['reviewText'].encode('unicode_escape').decode('utf-8')
if len(text.split()) > 300:
text = ' '.join(text.split()[:300])
score = int(row['overall'])
lines.append([text, score])
print("Shuffling data for {}".format(genre))
random.shuffle(lines)
# 25k train, 2k valid, 2k test
train = lines[:25000]
valid = lines[25000:25000+2000]
test = lines[27000:27000+2000]
oarrays = [train, valid, test]
# 1.5m train, 10k valid, 10k test
utrain = lines[29000:1529000]
uvalid = lines[1529000:1539000]
utest = lines[1539000:1549000]
uarrays = [utrain, uvalid, utest]
print("Writing data for {}".format(genre))
for i in range(3):
for line in oarrays[i]:
orig_in_files[i].write(line[0] + '\n')
orig_l_files[i].write(str(line[1]) + '\n')
for line in uarrays[i]:
unl_in_files[i].write(line[0] + '\n')
unl_l_files[i].write(str(line[1]) + '\n')
if __name__ == "__main__":
random.seed(1)
for genre in ['kindle', 'pets', 'tools', 'books', 'clothing', 'home', 'movies', 'sports', 'tech', 'toys']:
separate_data(genre)
|
[
"random.shuffle",
"random.seed",
"os.path.join",
"json.loads"
] |
[((2900, 2914), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (2911, 2914), False, 'import random\n'), ((2079, 2100), 'random.shuffle', 'random.shuffle', (['lines'], {}), '(lines)\n', (2093, 2100), False, 'import random\n'), ((137, 184), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""orig"""', '"""raw.json"""'], {}), "(d_path, genre, 'orig', 'raw.json')\n", (149, 184), False, 'import os\n'), ((217, 272), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""orig"""', '"""train.raw.input0"""'], {}), "(d_path, genre, 'orig', 'train.raw.input0')\n", (229, 272), False, 'import os\n'), ((306, 360), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""orig"""', '"""train.raw.label"""'], {}), "(d_path, genre, 'orig', 'train.raw.label')\n", (318, 360), False, 'import os\n'), ((394, 449), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""orig"""', '"""valid.raw.input0"""'], {}), "(d_path, genre, 'orig', 'valid.raw.input0')\n", (406, 449), False, 'import os\n'), ((483, 537), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""orig"""', '"""valid.raw.label"""'], {}), "(d_path, genre, 'orig', 'valid.raw.label')\n", (495, 537), False, 'import os\n'), ((571, 625), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""orig"""', '"""test.raw.input0"""'], {}), "(d_path, genre, 'orig', 'test.raw.input0')\n", (583, 625), False, 'import os\n'), ((660, 713), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""orig"""', '"""test.raw.label"""'], {}), "(d_path, genre, 'orig', 'test.raw.label')\n", (672, 713), False, 'import os\n'), ((748, 809), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""unlabelled"""', '"""train.raw.input0"""'], {}), "(d_path, genre, 'unlabelled', 'train.raw.input0')\n", (760, 809), False, 'import os\n'), ((844, 904), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""unlabelled"""', '"""train.raw.label"""'], {}), "(d_path, genre, 'unlabelled', 'train.raw.label')\n", (856, 904), False, 'import os\n'), ((939, 1000), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""unlabelled"""', '"""valid.raw.input0"""'], {}), "(d_path, genre, 'unlabelled', 'valid.raw.input0')\n", (951, 1000), False, 'import os\n'), ((1035, 1095), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""unlabelled"""', '"""valid.raw.label"""'], {}), "(d_path, genre, 'unlabelled', 'valid.raw.label')\n", (1047, 1095), False, 'import os\n'), ((1130, 1190), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""unlabelled"""', '"""test.raw.input0"""'], {}), "(d_path, genre, 'unlabelled', 'test.raw.input0')\n", (1142, 1190), False, 'import os\n'), ((1226, 1285), 'os.path.join', 'os.path.join', (['d_path', 'genre', '"""unlabelled"""', '"""test.raw.label"""'], {}), "(d_path, genre, 'unlabelled', 'test.raw.label')\n", (1238, 1285), False, 'import os\n'), ((1685, 1701), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1695, 1701), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
"""
Test Generic Map
"""
import os
import pytest
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import sunpy
import sunpy.map
import sunpy.coordinates
import sunpy.data.test
from sunpy.tests.helpers import figure_test
testpath = sunpy.data.test.rootdir
@pytest.fixture
def aia171_test_map():
return sunpy.map.Map(os.path.join(testpath, 'aia_171_level1.fits'))
@pytest.fixture
def heliographic_test_map():
return sunpy.map.Map(os.path.join(testpath, 'heliographic_phase_map.fits.gz'))
@pytest.fixture
def aia171_test_map_with_mask(aia171_test_map):
shape = aia171_test_map.data.shape
mask = np.zeros_like(aia171_test_map.data, dtype=bool)
mask[0:shape[0] // 2, 0:shape[1] // 2] = True
return sunpy.map.Map(np.ma.array(
aia171_test_map.data, mask=mask),
aia171_test_map.meta)
@figure_test
def test_plot_aia171(aia171_test_map):
aia171_test_map.plot()
@figure_test
def test_plot_aia171_clip(aia171_test_map):
aia171_test_map.plot(clip_interval=(5., 99.)*u.percent)
@figure_test
def test_peek_aia171(aia171_test_map):
aia171_test_map.peek()
@figure_test
def test_peek_grid_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=True)
@figure_test
def test_peek_grid_spacing_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=(5, 5) * u.deg)
@figure_test
def test_peek_limb_aia171(aia171_test_map):
aia171_test_map.peek(draw_limb=True)
@figure_test
def test_draw_grid_aia171(aia171_test_map):
aia171_test_map.plot()
aia171_test_map.draw_grid(grid_spacing=(30, 40) * u.deg)
@figure_test
def test_peek_grid_limb_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=True, draw_limb=True)
@figure_test
def test_plot_aia171_nowcsaxes(aia171_test_map):
ax = plt.gca()
aia171_test_map.plot(axes=ax)
@figure_test
def test_rectangle_aia171(aia171_test_map):
aia171_test_map.plot()
bottom_left = SkyCoord(
0 * u.arcsec, 0 * u.arcsec, frame=aia171_test_map.coordinate_frame)
w = 100 * u.arcsec
h = 100 * u.arcsec
aia171_test_map.draw_rectangle(bottom_left, w, h)
@figure_test
def test_plot_masked_aia171(aia171_test_map_with_mask):
aia171_test_map_with_mask.plot()
@figure_test
def test_plot_masked_aia171_nowcsaxes(aia171_test_map_with_mask):
ax = plt.gca()
aia171_test_map_with_mask.plot(axes=ax)
@figure_test
def test_plot_aia171_superpixel(aia171_test_map):
aia171_test_map.superpixel((9, 7) * u.pix, offset=(4, 4) * u.pix).plot()
@figure_test
def test_plot_aia171_superpixel_nowcsaxes(aia171_test_map):
ax = plt.gca()
aia171_test_map.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot(axes=ax)
@figure_test
def test_plot_masked_aia171_superpixel(aia171_test_map_with_mask):
aia171_test_map_with_mask.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot()
@figure_test
def test_plot_masked_aia171_superpixel_nowcsaxes(aia171_test_map_with_mask):
ax = plt.gca()
aia171_test_map_with_mask.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot(axes=ax)
@figure_test
def test_draw_contours_aia(aia171_test_map):
aia171_test_map.plot()
aia171_test_map.draw_contours(u.Quantity(np.arange(1, 100, 10), 'percent'))
@figure_test
def test_heliographic_peek(heliographic_test_map):
heliographic_test_map.peek()
@figure_test
def test_heliographic_rectangle(heliographic_test_map):
heliographic_test_map.plot()
bottom = SkyCoord(
60 * u.deg, 50 * u.deg, frame=heliographic_test_map.coordinate_frame)
w = 13 * u.deg
h = 13 * u.deg
heliographic_test_map.draw_rectangle(bottom, w, h, color='cyan')
@figure_test
def test_heliographic_grid_annotations(heliographic_test_map):
heliographic_test_map.plot()
heliographic_test_map.draw_grid(annotate=False)
|
[
"numpy.zeros_like",
"os.path.join",
"numpy.ma.array",
"numpy.arange",
"matplotlib.pyplot.gca",
"astropy.coordinates.SkyCoord"
] |
[((711, 758), 'numpy.zeros_like', 'np.zeros_like', (['aia171_test_map.data'], {'dtype': 'bool'}), '(aia171_test_map.data, dtype=bool)\n', (724, 758), True, 'import numpy as np\n'), ((1876, 1885), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((2024, 2100), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0 * u.arcsec)', '(0 * u.arcsec)'], {'frame': 'aia171_test_map.coordinate_frame'}), '(0 * u.arcsec, 0 * u.arcsec, frame=aia171_test_map.coordinate_frame)\n', (2032, 2100), False, 'from astropy.coordinates import SkyCoord\n'), ((2408, 2417), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2415, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2688, 2697), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2695, 2697), True, 'import matplotlib.pyplot as plt\n'), ((3070, 3079), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3077, 3079), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3644), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(60 * u.deg)', '(50 * u.deg)'], {'frame': 'heliographic_test_map.coordinate_frame'}), '(60 * u.deg, 50 * u.deg, frame=heliographic_test_map.coordinate_frame)\n', (3574, 3644), False, 'from astropy.coordinates import SkyCoord\n'), ((418, 463), 'os.path.join', 'os.path.join', (['testpath', '"""aia_171_level1.fits"""'], {}), "(testpath, 'aia_171_level1.fits')\n", (430, 463), False, 'import os\n'), ((537, 593), 'os.path.join', 'os.path.join', (['testpath', '"""heliographic_phase_map.fits.gz"""'], {}), "(testpath, 'heliographic_phase_map.fits.gz')\n", (549, 593), False, 'import os\n'), ((834, 878), 'numpy.ma.array', 'np.ma.array', (['aia171_test_map.data'], {'mask': 'mask'}), '(aia171_test_map.data, mask=mask)\n', (845, 878), True, 'import numpy as np\n'), ((3315, 3336), 'numpy.arange', 'np.arange', (['(1)', '(100)', '(10)'], {}), '(1, 100, 10)\n', (3324, 3336), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.utils import flt
from frappe.model.mapper import get_mapped_doc
from erpnext.hr.utils import validate_tax_declaration, get_total_exemption_amount, \
calculate_annual_eligible_hra_exemption, validate_duplicate_exemption_for_payroll_period
class EmployeeTaxExemptionDeclaration(Document):
def validate(self):
validate_tax_declaration(self.declarations)
validate_duplicate_exemption_for_payroll_period(self.doctype, self.name, self.payroll_period, self.employee)
self.set_total_declared_amount()
self.set_total_exemption_amount()
self.calculate_hra_exemption()
def set_total_declared_amount(self):
self.total_declared_amount = 0.0
for d in self.declarations:
self.total_declared_amount += flt(d.amount)
def set_total_exemption_amount(self):
self.total_exemption_amount = get_total_exemption_amount(self.declarations)
def calculate_hra_exemption(self):
self.salary_structure_hra, self.annual_hra_exemption, self.monthly_hra_exemption = 0, 0, 0
if self.get("monthly_house_rent"):
hra_exemption = calculate_annual_eligible_hra_exemption(self)
if hra_exemption:
self.total_exemption_amount += hra_exemption["annual_exemption"]
self.salary_structure_hra = hra_exemption["hra_amount"]
self.annual_hra_exemption = hra_exemption["annual_exemption"]
self.monthly_hra_exemption = hra_exemption["monthly_exemption"]
@frappe.whitelist()
def make_proof_submission(source_name, target_doc=None):
doclist = get_mapped_doc("Employee Tax Exemption Declaration", source_name, {
"Employee Tax Exemption Declaration": {
"doctype": "Employee Tax Exemption Proof Submission",
"field_no_map": ["monthly_house_rent", "monthly_hra_exemption"]
},
"Employee Tax Exemption Declaration Category": {
"doctype": "Employee Tax Exemption Proof Submission Detail",
"add_if_empty": True
}
}, target_doc)
return doclist
|
[
"frappe.utils.flt",
"erpnext.hr.utils.get_total_exemption_amount",
"frappe.whitelist",
"frappe.model.mapper.get_mapped_doc",
"erpnext.hr.utils.validate_tax_declaration",
"erpnext.hr.utils.validate_duplicate_exemption_for_payroll_period",
"erpnext.hr.utils.calculate_annual_eligible_hra_exemption"
] |
[((1632, 1650), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (1648, 1650), False, 'import frappe\n'), ((1719, 2116), 'frappe.model.mapper.get_mapped_doc', 'get_mapped_doc', (['"""Employee Tax Exemption Declaration"""', 'source_name', "{'Employee Tax Exemption Declaration': {'doctype':\n 'Employee Tax Exemption Proof Submission', 'field_no_map': [\n 'monthly_house_rent', 'monthly_hra_exemption']},\n 'Employee Tax Exemption Declaration Category': {'doctype':\n 'Employee Tax Exemption Proof Submission Detail', 'add_if_empty': True}}", 'target_doc'], {}), "('Employee Tax Exemption Declaration', source_name, {\n 'Employee Tax Exemption Declaration': {'doctype':\n 'Employee Tax Exemption Proof Submission', 'field_no_map': [\n 'monthly_house_rent', 'monthly_hra_exemption']},\n 'Employee Tax Exemption Declaration Category': {'doctype':\n 'Employee Tax Exemption Proof Submission Detail', 'add_if_empty': True}\n }, target_doc)\n", (1733, 2116), False, 'from frappe.model.mapper import get_mapped_doc\n'), ((586, 629), 'erpnext.hr.utils.validate_tax_declaration', 'validate_tax_declaration', (['self.declarations'], {}), '(self.declarations)\n', (610, 629), False, 'from erpnext.hr.utils import validate_tax_declaration, get_total_exemption_amount, calculate_annual_eligible_hra_exemption, validate_duplicate_exemption_for_payroll_period\n'), ((632, 744), 'erpnext.hr.utils.validate_duplicate_exemption_for_payroll_period', 'validate_duplicate_exemption_for_payroll_period', (['self.doctype', 'self.name', 'self.payroll_period', 'self.employee'], {}), '(self.doctype, self.name,\n self.payroll_period, self.employee)\n', (679, 744), False, 'from erpnext.hr.utils import validate_tax_declaration, get_total_exemption_amount, calculate_annual_eligible_hra_exemption, validate_duplicate_exemption_for_payroll_period\n'), ((1068, 1113), 'erpnext.hr.utils.get_total_exemption_amount', 'get_total_exemption_amount', (['self.declarations'], {}), '(self.declarations)\n', (1094, 1113), False, 'from erpnext.hr.utils import validate_tax_declaration, get_total_exemption_amount, calculate_annual_eligible_hra_exemption, validate_duplicate_exemption_for_payroll_period\n'), ((982, 995), 'frappe.utils.flt', 'flt', (['d.amount'], {}), '(d.amount)\n', (985, 995), False, 'from frappe.utils import flt\n'), ((1300, 1345), 'erpnext.hr.utils.calculate_annual_eligible_hra_exemption', 'calculate_annual_eligible_hra_exemption', (['self'], {}), '(self)\n', (1339, 1345), False, 'from erpnext.hr.utils import validate_tax_declaration, get_total_exemption_amount, calculate_annual_eligible_hra_exemption, validate_duplicate_exemption_for_payroll_period\n')]
|
#! /usr/bin/env python
# Public domain; MZMcBride, 2011; Legoktm, 2014
from flask import Flask, request, render_template
from flask_caching import Cache
import re
import requests
import operator
import toolforge
app = Flask(__name__)
cache = Cache(
app,
config={'CACHE_TYPE': 'redis',
'CACHE_REDIS_HOST': 'tools-redis',
'CACHE_KEY_PREFIX': 'tool-checker'}
)
toolforge.set_user_agent('checker')
@cache.cached(timeout=60*60*24)
def database_list():
conn = toolforge.connect('meta_p')
cursor = conn.cursor()
cursor.execute('''
/* checker.py database_list */
SELECT
dbname
FROM wiki
WHERE is_closed = 0
ORDER BY dbname ASC;
''')
databases = cursor.fetchall()
cursor.close()
conn.close()
ret = []
for database in databases:
ret.append(database[0])
return ret
@cache.memoize(timeout=60*60*24)
def choose_host_and_domain(db):
conn = toolforge.connect('meta_p')
cursor = conn.cursor()
cursor.execute('''
/* checker.py choose_host_and_domain */
SELECT
url
FROM wiki
WHERE dbname = %s;
''', (db,))
result = cursor.fetchall()
cursor.close()
conn.close()
if result:
for row in result:
domain = '%s' % row[0]
if domain:
return {'host': db + '.labsdb', 'domain': domain}
return None
@cache.memoize(timeout=60*60*24)
def get_extension_namespaces(domain):
params = {
'action': 'query',
'meta': 'proofreadinfo|siteinfo',
'piprop': 'namespaces',
'siprop': 'namespaces',
'format': 'json'
}
query_url = '%s/w/api.php' % domain
req = requests.get(query_url, params=params)
parsed_content = req.json()
try:
page_namespace = parsed_content['query']['proofreadnamespaces']['page']['id']
index_namespace = parsed_content['query']['proofreadnamespaces']['index']['id']
except KeyError:
return None
names = parsed_content['query']['namespaces']
return {'page_namespace': page_namespace,
'index_namespace': index_namespace,
'names': names}
def get_page_links(cursor, db, page_namespace, index_namespace, index_page):
page_links = []
cursor.execute('''
/* checker.py get_page_links */
SELECT
pl_title
FROM pagelinks
JOIN page AS p1
ON pl_from = p1.page_id
JOIN page AS p2
ON p2.page_title = pl_title
AND p2.page_namespace = pl_namespace
WHERE pl_namespace = %s
AND p1.page_namespace = %s
AND p1.page_title = %s;
''', (page_namespace, index_namespace, index_page))
for row in cursor.fetchall():
pl_title = row[0].decode()
try:
sort_key = int(row[0].decode().rsplit('/', 1)[1])
except IndexError:
sort_key = 1
page_links.append([pl_title, sort_key])
return page_links
def get_page_status(cursor, db, page_namespace, page):
page_status = {}
# Check if the page has transclusions first
cursor.execute('''
/* checker.py get_page_status */
SELECT
COUNT(*)
FROM templatelinks
JOIN page
ON tl_from = page_id
WHERE tl_namespace = %s
AND tl_title = %s
AND page_namespace = 0;
''', (page_namespace, page))
transclusion_count = cursor.fetchall()
if transclusion_count:
page_status['transclusion_count'] = int(transclusion_count[0][0])
# Then check if the page has been proofread
cursor.execute('''
/* checker.py get_page_status */
SELECT
cl_to
FROM page
JOIN categorylinks
ON cl_from = page_id
WHERE page_id = cl_from
AND page_namespace = %s
AND page_title = %s;
''', (page_namespace, page))
proofread_status = cursor.fetchall()
if proofread_status:
page_status['proofread_status'] = proofread_status[0][0].decode().lower().replace('_', ' ')
return page_status
@app.route('/')
def main():
host = db = domain = extension_dict = None
# Pick a db; make enwikisource the default
if request.args.get('db') is not None:
db = request.args.get('db').replace('_p', '')
else:
db = 'enwikisource'
# All right, now let's pick a host and domain
connection_props = choose_host_and_domain(db)
if connection_props:
host = connection_props['host']
domain = connection_props['domain']
if domain:
extension_dict = get_extension_namespaces(domain)
if extension_dict:
page_namespace_id = extension_dict['page_namespace']
index_namespace_id = extension_dict['index_namespace']
page_namespace_name = extension_dict['names'][str(page_namespace_id)]['*']
index_namespace_name = extension_dict['names'][str(index_namespace_id)]['*']
if 'title' in request.args:
title = request.args.get('title')
else:
title = ''
yes_rows = []
no_rows = []
error = None
if host is not None and title and extension_dict:
conn = toolforge.connect(db)
cursor = conn.cursor()
# Eliminate LTR and RTL marks and strip extra whitespace.
title = re.sub(r'(\xe2\x80\x8e|\xe2\x80\x8f)', '', title).strip(' ')
# Prep the title for the query (replace spaces and strip namespace name if present).
clean_title = title.replace(' ', '_').split(index_namespace_name+':', 1)[1]
page_links = get_page_links(cursor, db+'_p', page_namespace_id, index_namespace_id, clean_title)
if page_links:
# Sort!
page_links = sorted(page_links, key=operator.itemgetter(1))
for item in page_links:
page_link = item[0]
status = get_page_status(cursor, db+'_p', page_namespace_id, page_link)
table_row = {
'domain': domain,
'ns': page_namespace_name,
'title': page_link,
'status': status['proofread_status']
}
if status['transclusion_count'] > 0:
yes_rows.append(table_row)
else:
no_rows.append(table_row)
cursor.close()
conn.close()
show_form = False
if title:
if not (db and host is not None and title and extension_dict):
error = 'There was some sort of error. Sorry. :-('
elif host is None:
error = "You didn't specify an appropriate database name."
else:
show_form = True
return render_template(
'main.html',
error=error,
yes_rows=yes_rows,
no_rows=no_rows,
show_form=show_form,
databases=database_list(),
selected_db=db,
clean=lambda x: x.replace('_', ' ')
)
if __name__ == '__main__':
app.run(debug=True)
|
[
"flask_caching.Cache",
"flask.request.args.get",
"flask.Flask",
"toolforge.connect",
"requests.get",
"toolforge.set_user_agent",
"operator.itemgetter",
"re.sub"
] |
[((220, 235), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (225, 235), False, 'from flask import Flask, request, render_template\n'), ((244, 361), 'flask_caching.Cache', 'Cache', (['app'], {'config': "{'CACHE_TYPE': 'redis', 'CACHE_REDIS_HOST': 'tools-redis',\n 'CACHE_KEY_PREFIX': 'tool-checker'}"}), "(app, config={'CACHE_TYPE': 'redis', 'CACHE_REDIS_HOST': 'tools-redis',\n 'CACHE_KEY_PREFIX': 'tool-checker'})\n", (249, 361), False, 'from flask_caching import Cache\n'), ((392, 427), 'toolforge.set_user_agent', 'toolforge.set_user_agent', (['"""checker"""'], {}), "('checker')\n", (416, 427), False, 'import toolforge\n'), ((494, 521), 'toolforge.connect', 'toolforge.connect', (['"""meta_p"""'], {}), "('meta_p')\n", (511, 521), False, 'import toolforge\n'), ((942, 969), 'toolforge.connect', 'toolforge.connect', (['"""meta_p"""'], {}), "('meta_p')\n", (959, 969), False, 'import toolforge\n'), ((1681, 1719), 'requests.get', 'requests.get', (['query_url'], {'params': 'params'}), '(query_url, params=params)\n', (1693, 1719), False, 'import requests\n'), ((4057, 4079), 'flask.request.args.get', 'request.args.get', (['"""db"""'], {}), "('db')\n", (4073, 4079), False, 'from flask import Flask, request, render_template\n'), ((4860, 4885), 'flask.request.args.get', 'request.args.get', (['"""title"""'], {}), "('title')\n", (4876, 4885), False, 'from flask import Flask, request, render_template\n'), ((5039, 5060), 'toolforge.connect', 'toolforge.connect', (['db'], {}), '(db)\n', (5056, 5060), False, 'import toolforge\n'), ((4106, 4128), 'flask.request.args.get', 'request.args.get', (['"""db"""'], {}), "('db')\n", (4122, 4128), False, 'from flask import Flask, request, render_template\n'), ((5174, 5228), 're.sub', 're.sub', (['"""(\\\\xe2\\\\x80\\\\x8e|\\\\xe2\\\\x80\\\\x8f)"""', '""""""', 'title'], {}), "('(\\\\xe2\\\\x80\\\\x8e|\\\\xe2\\\\x80\\\\x8f)', '', title)\n", (5180, 5228), False, 'import re\n'), ((5608, 5630), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (5627, 5630), False, 'import operator\n')]
|
"""
Contains methods for YOLOv3 models for object detection.
Trained on COCO dataset.
"""
import re
import numpy as np
from collections import defaultdict
from kenning.core.model import ModelWrapper
from kenning.datasets.open_images_dataset import DectObject, compute_iou, Dataset # noqa: E501
import sys
if sys.version_info.minor < 9:
from importlib_resources import path
else:
from importlib.resources import path
from kenning.resources import coco_detection
from pathlib import Path
class TVMDarknetCOCOYOLOV3(ModelWrapper):
def __init__(
self,
modelpath,
dataset,
from_file,
class_names: str = "coco"):
self.thresh = 0.2
self.iouthresh = 0.5
super().__init__(modelpath, dataset, from_file)
# for work with dataproviders, this is handling dataset-less operation
if self.dataset is None:
self.batch_size = 1
self.classnames = []
if class_names == 'coco':
with path(coco_detection, 'cocov6.classes') as p:
with open(p, 'r') as f:
for line in f:
self.classnames.append(line.split(',')[1].strip())
else:
with Path(class_names) as p:
with open(p, 'r') as f:
for line in f:
self.classnames.append(line.split(',')[1].strip())
else:
self.batch_size = self.dataset.batch_size
self.classnames = self.dataset.classnames
self.numclasses = len(self.classnames)
@classmethod
def form_argparse(cls, no_dataset: bool = False):
parser, group = super().form_argparse(no_dataset)
if no_dataset:
group.add_argument(
'--classes',
help='File containing Open Images class IDs and class names in CSV format to use (can be generated using kenning.scenarios.open_images_classes_extractor) or class type', # noqa: E501
type=str,
default='coco'
)
return parser, group
@classmethod
def from_argparse(
cls,
dataset: Dataset,
args,
from_file: bool = True):
return cls(args.model_path, dataset, from_file, args.classes)
def load_model(self, modelpath):
self.keyparams = {}
self.perlayerparams = defaultdict(list)
keyparamsrgx = re.compile(r'(width|height|classes)=(\d+)')
perlayerrgx = re.compile(r'(mask|anchors|num)=((\d+,?)+)')
with open(self.modelpath.with_suffix('.cfg'), 'r') as config:
for line in config:
line = line.replace(' ', '')
res = keyparamsrgx.match(line)
if res:
self.keyparams[res.group(1)] = int(res.group(2))
continue
res = perlayerrgx.match(line)
if res:
self.perlayerparams[res.group(1)].append(res.group(2))
self.perlayerparams = {
k: [np.array([int(x) for x in s.split(',')]) for s in v]
for k, v in self.perlayerparams.items()
}
def prepare_model(self):
self.load_model(self.modelpath)
def get_input_spec(self):
return {
'data': (
1, 3, self.keyparams['width'], self.keyparams['height']
)
}, 'float32'
def preprocess_input(self, X):
return np.array(X)
def convert_to_dectobject(self, entry):
# array x, y, w, h, classid, score
x1 = entry[0] - entry[2] / 2
x2 = entry[0] + entry[2] / 2
y1 = entry[1] - entry[3] / 2
y2 = entry[1] + entry[3] / 2
return DectObject(
self.classnames[entry[4]],
x1, y1, x2, y2,
entry[5]
)
def parse_outputs(self, data):
# get all bounding boxes with objectness score over given threshold
boxdata = []
for i in range(len(data)):
ids = np.asarray(np.where(data[i][:, 4, :, :] > self.thresh))
ids = np.transpose(ids)
if ids.shape[0] > 0:
ids = np.append([[i]] * ids.shape[0], ids, axis=1)
boxdata.append(ids)
if len(boxdata) > 0:
boxdata = np.concatenate(boxdata)
# each entry in boxdata contains:
# - layer id
# - det id
# - y id
# - x id
bboxes = []
for box in boxdata:
# x and y values from network are coordinates in a chunk
# to get the actual coordinates, we need to compute
# new_coords = (chunk_coords + out_coords) / out_resolution
x = (box[3] + data[box[0]][box[1], 0, box[2], box[3]]) / data[box[0]].shape[2] # noqa: E501
y = (box[2] + data[box[0]][box[1], 1, box[2], box[3]]) / data[box[0]].shape[3] # noqa: E501
# width and height are computed using following formula:
# w = anchor_w * exp(out_w) / input_w
# h = anchor_h * exp(out_h) / input_h
# anchors are computed based on dataset analysis
maskid = self.perlayerparams['mask'][2 - box[0]][box[1]]
anchors = self.perlayerparams['anchors'][box[0]][2 * maskid:2 * maskid + 2] # noqa: E501
w = anchors[0] * np.exp(data[box[0]][box[1], 2, box[2], box[3]]) / self.keyparams['width'] # noqa: E501
h = anchors[1] * np.exp(data[box[0]][box[1], 3, box[2], box[3]]) / self.keyparams['height'] # noqa: E501
# get objectness score
objectness = data[box[0]][box[1], 4, box[2], box[3]]
# get class with highest probability
classid = np.argmax(data[box[0]][box[1], 5:, box[2], box[3]])
# compute final class score (objectness * class probability
score = objectness * data[box[0]][box[1], classid + 5, box[2], box[3]] # noqa: E501
# drop the bounding box if final score is below threshold
if score < self.thresh:
continue
bboxes.append([x, y, w, h, classid, score])
# sort the bboxes by score descending
bboxes.sort(key=lambda x: x[5], reverse=True)
bboxes = [self.convert_to_dectobject(b) for b in bboxes]
# group bboxes by class to perform NMS sorting
grouped_bboxes = defaultdict(list)
for item in bboxes:
grouped_bboxes[item.clsname].append(item)
# perform NMS sort to drop overlapping predictions for the same class
cleaned_bboxes = []
for clsbboxes in grouped_bboxes.values():
for i in range(len(clsbboxes)):
# if score equals 0, the bbox is dropped
if clsbboxes[i].score == 0:
continue
# add current bbox to final results
cleaned_bboxes.append(clsbboxes[i])
# look for overlapping bounding boxes with lower probability
# and IoU exceeding specified threshold
for j in range(i + 1, len(clsbboxes)):
if compute_iou(clsbboxes[i], clsbboxes[j]) > self.iouthresh: # noqa: E501
clsbboxes[j] = clsbboxes[j]._replace(score=0)
return cleaned_bboxes
def postprocess_outputs(self, y):
# YOLOv3 has three stages of outputs
# each one contains:
# - real output
# - masks
# - biases
# TVM-based model output provides 12 arrays
# Those are subdivided into three groups containing
# - actual YOLOv3 output
# - masks IDs
# - anchors
# - 6 integers holding number of dects per cluster, actual output
# number of channels, actual output height and width, number of
# classes and unused parameter
# iterate over each group
lastid = 0
outputs = []
for i in range(3):
# first extract the actual output
# each output layer shape follows formula:
# (BS, B * (4 + 1 + C), w / (8 * (i + 1)), h / (8 * (i + 1)))
# BS is the batch size
# w, h are width and height of the input image
# the resolution is reduced over the network, and is 8 times
# smaller in each dimension for each output
# the "pixels" in the outputs are responsible for the chunks of
# image - in the first output each pixel is responsible for 8x8
# squares of input image, the second output covers objects from
# 16x16 chunks etc.
# Each "pixel" can predict up to B bounding boxes.
# Each bounding box is described by its 4 coordinates,
# objectness prediction and per-class predictions
outshape = (
self.batch_size,
len(self.perlayerparams['mask'][i]),
4 + 1 + self.numclasses,
self.keyparams['width'] // (8 * 2 ** i),
self.keyparams['height'] // (8 * 2 ** i)
)
outputs.append(
y[lastid:(lastid + np.prod(outshape))].reshape(outshape)
)
# drop additional info provided in the TVM output
# since it's all 4-bytes values, ignore the insides
lastid += (
np.prod(outshape)
+ len(self.perlayerparams['mask'][i])
+ len(self.perlayerparams['anchors'][i])
+ 6 # layer parameters
)
# change the dimensions so the output format is
# batches layerouts dets params width height
perbatchoutputs = []
for i in range(outputs[0].shape[0]):
perbatchoutputs.append([
outputs[0][i],
outputs[1][i],
outputs[2][i]
])
result = []
# parse the combined outputs for each image in batch, and return result
for out in perbatchoutputs:
result.append(self.parse_outputs(out))
return result
def convert_input_to_bytes(self, inputdata):
return inputdata.tobytes()
def convert_output_from_bytes(self, outputdata):
return np.frombuffer(outputdata, dtype='float32')
def get_framework_and_version(self):
return ('darknet', 'alexeyab')
|
[
"kenning.datasets.open_images_dataset.DectObject",
"numpy.argmax",
"numpy.frombuffer",
"numpy.transpose",
"numpy.prod",
"collections.defaultdict",
"numpy.append",
"pathlib.Path",
"numpy.where",
"numpy.array",
"importlib.resources.path",
"numpy.exp",
"kenning.datasets.open_images_dataset.compute_iou",
"numpy.concatenate",
"re.compile"
] |
[((2457, 2474), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2468, 2474), False, 'from collections import defaultdict\n'), ((2498, 2541), 're.compile', 're.compile', (['"""(width|height|classes)=(\\\\d+)"""'], {}), "('(width|height|classes)=(\\\\d+)')\n", (2508, 2541), False, 'import re\n'), ((2564, 2608), 're.compile', 're.compile', (['"""(mask|anchors|num)=((\\\\d+,?)+)"""'], {}), "('(mask|anchors|num)=((\\\\d+,?)+)')\n", (2574, 2608), False, 'import re\n'), ((3532, 3543), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3540, 3543), True, 'import numpy as np\n'), ((3795, 3858), 'kenning.datasets.open_images_dataset.DectObject', 'DectObject', (['self.classnames[entry[4]]', 'x1', 'y1', 'x2', 'y2', 'entry[5]'], {}), '(self.classnames[entry[4]], x1, y1, x2, y2, entry[5])\n', (3805, 3858), False, 'from kenning.datasets.open_images_dataset import DectObject, compute_iou, Dataset\n'), ((6445, 6462), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6456, 6462), False, 'from collections import defaultdict\n'), ((10305, 10347), 'numpy.frombuffer', 'np.frombuffer', (['outputdata'], {'dtype': '"""float32"""'}), "(outputdata, dtype='float32')\n", (10318, 10347), True, 'import numpy as np\n'), ((4165, 4182), 'numpy.transpose', 'np.transpose', (['ids'], {}), '(ids)\n', (4177, 4182), True, 'import numpy as np\n'), ((4371, 4394), 'numpy.concatenate', 'np.concatenate', (['boxdata'], {}), '(boxdata)\n', (4385, 4394), True, 'import numpy as np\n'), ((5786, 5837), 'numpy.argmax', 'np.argmax', (['data[box[0]][box[1], 5:, box[2], box[3]]'], {}), '(data[box[0]][box[1], 5:, box[2], box[3]])\n', (5795, 5837), True, 'import numpy as np\n'), ((4102, 4145), 'numpy.where', 'np.where', (['(data[i][:, 4, :, :] > self.thresh)'], {}), '(data[i][:, 4, :, :] > self.thresh)\n', (4110, 4145), True, 'import numpy as np\n'), ((4238, 4282), 'numpy.append', 'np.append', (['([[i]] * ids.shape[0])', 'ids'], {'axis': '(1)'}), '([[i]] * ids.shape[0], ids, axis=1)\n', (4247, 4282), True, 'import numpy as np\n'), ((1033, 1071), 'importlib.resources.path', 'path', (['coco_detection', '"""cocov6.classes"""'], {}), "(coco_detection, 'cocov6.classes')\n", (1037, 1071), False, 'from importlib.resources import path\n'), ((1279, 1296), 'pathlib.Path', 'Path', (['class_names'], {}), '(class_names)\n', (1283, 1296), False, 'from pathlib import Path\n'), ((5407, 5454), 'numpy.exp', 'np.exp', (['data[box[0]][box[1], 2, box[2], box[3]]'], {}), '(data[box[0]][box[1], 2, box[2], box[3]])\n', (5413, 5454), True, 'import numpy as np\n'), ((5524, 5571), 'numpy.exp', 'np.exp', (['data[box[0]][box[1], 3, box[2], box[3]]'], {}), '(data[box[0]][box[1], 3, box[2], box[3]])\n', (5530, 5571), True, 'import numpy as np\n'), ((7192, 7231), 'kenning.datasets.open_images_dataset.compute_iou', 'compute_iou', (['clsbboxes[i]', 'clsbboxes[j]'], {}), '(clsbboxes[i], clsbboxes[j])\n', (7203, 7231), False, 'from kenning.datasets.open_images_dataset import DectObject, compute_iou, Dataset\n'), ((9430, 9447), 'numpy.prod', 'np.prod', (['outshape'], {}), '(outshape)\n', (9437, 9447), True, 'import numpy as np\n'), ((9211, 9228), 'numpy.prod', 'np.prod', (['outshape'], {}), '(outshape)\n', (9218, 9228), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import numpy as np
import os
import time
from zhiqiang.utils.data_parallelism import DataParallelism
# define a simple model
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.random_start = torch.tensor(np.ones([1, 3, 7, 7]), dtype=torch.float32)
self.conv1 = nn.Conv2d(3, 32, 2)
def infer(self, input_batch):
conved = self.conv1(input_batch)
return conved
# define subprocess run function
def process_function(list_data, idx, queue, settings):
"""
"""
model = settings["model"][idx]
name = settings["name"][idx]
print("subprocess id:%d,run:%s" % (os.getpid(), name))
result = model.infer(model.random_start)
print(result.size())
def merge_function(queue, settings):
pass
#
if __name__ == "__main__":
# work
simple_model = SimpleModel()
result = simple_model.infer(simple_model.random_start)
print(result.size()) # torch.Size([1, 32, 6, 6]), as expected
# work
model_0 = SimpleModel()
settings = {"model": [model_0], "name": ["model_0"]}
process_function([], 0, None, settings)
#
model_1 = SimpleModel()
model_2 = SimpleModel()
import multiprocessing as mp
mp.set_start_method("spawn")
print("main process id:%d" % os.getpid())
#
settings = {"model": [model_1, model_2],
"name": ["model_1", "model_2"]}
data_paral = DataParallelism(2)
data_paral.do_processing([0, 1], process_function, merge_function, settings)
|
[
"os.getpid",
"torch.nn.Conv2d",
"multiprocessing.set_start_method",
"numpy.ones",
"zhiqiang.utils.data_parallelism.DataParallelism"
] |
[((1281, 1309), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (1300, 1309), True, 'import multiprocessing as mp\n'), ((1474, 1492), 'zhiqiang.utils.data_parallelism.DataParallelism', 'DataParallelism', (['(2)'], {}), '(2)\n', (1489, 1492), False, 'from zhiqiang.utils.data_parallelism import DataParallelism\n'), ((370, 389), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(32)', '(2)'], {}), '(3, 32, 2)\n', (379, 389), True, 'import torch.nn as nn\n'), ((305, 326), 'numpy.ones', 'np.ones', (['[1, 3, 7, 7]'], {}), '([1, 3, 7, 7])\n', (312, 326), True, 'import numpy as np\n'), ((1346, 1357), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1355, 1357), False, 'import os\n'), ((704, 715), 'os.getpid', 'os.getpid', ([], {}), '()\n', (713, 715), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import six
from flask import abort, current_app, escape, url_for
from flask_login import current_user
from dmapiclient.audit import AuditTypes
from dmutils.email.dm_notify import DMNotifyClient
from dmutils.email.exceptions import EmailError
from dmutils.email.helpers import hash_string
from dmutils.env_helpers import get_web_url_from_stage
from dmutils.formats import dateformat
def get_brief(data_api_client, brief_id, allowed_statuses=None):
if allowed_statuses is None:
allowed_statuses = []
brief = data_api_client.get_brief(brief_id)['briefs']
if allowed_statuses and brief['status'] not in allowed_statuses:
abort(404)
return brief
def is_supplier_eligible_for_brief(data_api_client, supplier_id, brief):
return data_api_client.is_supplier_eligible_for_brief(supplier_id, brief['id'])
def send_brief_clarification_question(data_api_client, brief, clarification_question):
questions_url = (
get_web_url_from_stage(current_app.config["DM_ENVIRONMENT"])
+ url_for('external.supplier_questions',
framework_slug=brief["framework"]['slug'],
lot_slug=brief["lotSlug"],
brief_id=brief["id"])
)
notify_client = DMNotifyClient(current_app.config['DM_NOTIFY_API_KEY'])
# Email the question to brief owners
for email_address in get_brief_user_emails(brief):
try:
notify_client.send_email(
email_address,
template_name_or_id=current_app.config['NOTIFY_TEMPLATES']['clarification_question'],
personalisation={
"brief_title": brief['title'],
"brief_name": brief['title'],
"message": escape(clarification_question),
"publish_by_date": dateformat(brief['clarificationQuestionsPublishedBy']),
"questions_url": questions_url
},
reference="clarification-question-{}".format(hash_string(email_address))
)
except EmailError as e:
current_app.logger.error(
"Brief question email failed to send. error={error} supplier_id={supplier_id} brief_id={brief_id}",
extra={'error': six.text_type(e), 'supplier_id': current_user.supplier_id, 'brief_id': brief['id']}
)
abort(503, "Clarification question email failed to send")
data_api_client.create_audit_event(
audit_type=AuditTypes.send_clarification_question,
user=current_user.email_address,
object_type="briefs",
object_id=brief['id'],
data={"question": clarification_question, "briefId": brief['id'], "supplierId": current_user.supplier_id})
brief_url = (
get_web_url_from_stage(current_app.config["DM_ENVIRONMENT"])
+ url_for('external.get_brief_by_id', framework_family=brief['framework']['family'], brief_id=brief['id'])
)
# Send the supplier a copy of the question
try:
notify_client.send_email(
current_user.email_address,
template_name_or_id=current_app.config["NOTIFY_TEMPLATES"]["clarification_question_confirmation"],
personalisation={
"brief_name": brief['title'],
"message": escape(clarification_question),
"brief_url": brief_url,
},
reference="clarification-question-confirmation-{}".format(hash_string(current_user.email_address))
)
except EmailError as e:
current_app.logger.error(
"Brief question supplier email failed to send. error={error} supplier_id={supplier_id} brief_id={brief_id}",
extra={'error': six.text_type(e), 'supplier_id': current_user.supplier_id, 'brief_id': brief['id']}
)
def get_brief_user_emails(brief):
return [user['emailAddress'] for user in brief['users'] if user['active']]
def is_legacy_brief_response(brief_response):
"""
In the legacy flow (DOS 1 only), the essentialRequirements answers were evaluated at the end of the application
(giving the supplier a pass or fail).
In the current flow, the supplier can't proceed past the essentialRequirements question unless they meet the
criteria - it's done with form validation on that page, rather than evaluating the answers at the end of the flow.
"""
return (brief_response['brief']['framework']['slug'] == 'digital-outcomes-and-specialists') and \
'essentialRequirements' in brief_response and \
'essentialRequirementsMet' not in brief_response
|
[
"flask.abort",
"six.text_type",
"dmutils.email.dm_notify.DMNotifyClient",
"dmutils.env_helpers.get_web_url_from_stage",
"flask.url_for",
"dmutils.formats.dateformat",
"dmutils.email.helpers.hash_string",
"flask.escape"
] |
[((1266, 1321), 'dmutils.email.dm_notify.DMNotifyClient', 'DMNotifyClient', (["current_app.config['DM_NOTIFY_API_KEY']"], {}), "(current_app.config['DM_NOTIFY_API_KEY'])\n", (1280, 1321), False, 'from dmutils.email.dm_notify import DMNotifyClient\n'), ((676, 686), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (681, 686), False, 'from flask import abort, current_app, escape, url_for\n'), ((983, 1043), 'dmutils.env_helpers.get_web_url_from_stage', 'get_web_url_from_stage', (["current_app.config['DM_ENVIRONMENT']"], {}), "(current_app.config['DM_ENVIRONMENT'])\n", (1005, 1043), False, 'from dmutils.env_helpers import get_web_url_from_stage\n'), ((1054, 1189), 'flask.url_for', 'url_for', (['"""external.supplier_questions"""'], {'framework_slug': "brief['framework']['slug']", 'lot_slug': "brief['lotSlug']", 'brief_id': "brief['id']"}), "('external.supplier_questions', framework_slug=brief['framework'][\n 'slug'], lot_slug=brief['lotSlug'], brief_id=brief['id'])\n", (1061, 1189), False, 'from flask import abort, current_app, escape, url_for\n'), ((2800, 2860), 'dmutils.env_helpers.get_web_url_from_stage', 'get_web_url_from_stage', (["current_app.config['DM_ENVIRONMENT']"], {}), "(current_app.config['DM_ENVIRONMENT'])\n", (2822, 2860), False, 'from dmutils.env_helpers import get_web_url_from_stage\n'), ((2871, 2980), 'flask.url_for', 'url_for', (['"""external.get_brief_by_id"""'], {'framework_family': "brief['framework']['family']", 'brief_id': "brief['id']"}), "('external.get_brief_by_id', framework_family=brief['framework'][\n 'family'], brief_id=brief['id'])\n", (2878, 2980), False, 'from flask import abort, current_app, escape, url_for\n'), ((2398, 2455), 'flask.abort', 'abort', (['(503)', '"""Clarification question email failed to send"""'], {}), "(503, 'Clarification question email failed to send')\n", (2403, 2455), False, 'from flask import abort, current_app, escape, url_for\n'), ((3327, 3357), 'flask.escape', 'escape', (['clarification_question'], {}), '(clarification_question)\n', (3333, 3357), False, 'from flask import abort, current_app, escape, url_for\n'), ((3484, 3523), 'dmutils.email.helpers.hash_string', 'hash_string', (['current_user.email_address'], {}), '(current_user.email_address)\n', (3495, 3523), False, 'from dmutils.email.helpers import hash_string\n'), ((1769, 1799), 'flask.escape', 'escape', (['clarification_question'], {}), '(clarification_question)\n', (1775, 1799), False, 'from flask import abort, current_app, escape, url_for\n'), ((1840, 1894), 'dmutils.formats.dateformat', 'dateformat', (["brief['clarificationQuestionsPublishedBy']"], {}), "(brief['clarificationQuestionsPublishedBy'])\n", (1850, 1894), False, 'from dmutils.formats import dateformat\n'), ((2027, 2053), 'dmutils.email.helpers.hash_string', 'hash_string', (['email_address'], {}), '(email_address)\n', (2038, 2053), False, 'from dmutils.email.helpers import hash_string\n'), ((3746, 3762), 'six.text_type', 'six.text_type', (['e'], {}), '(e)\n', (3759, 3762), False, 'import six\n'), ((2287, 2303), 'six.text_type', 'six.text_type', (['e'], {}), '(e)\n', (2300, 2303), False, 'import six\n')]
|
import os
from unittest.mock import MagicMock
import pytest
from azureml.core.workspace import Workspace
import prefect
from prefect.tasks.azureml import (
DatastoreRegisterBlobContainer,
DatastoreList,
DatastoreGet,
DatastoreUpload,
)
from prefect.utilities.configuration import set_temporary_config
@pytest.fixture
def mock_workspace():
return MagicMock(spec=Workspace)
class TestDatastoreRegisterBlobContainer:
def test_initialization(self, mock_workspace):
container_name = "my_container"
task = DatastoreRegisterBlobContainer(
workspace=mock_workspace, container_name=container_name
)
assert task.workspace == mock_workspace
assert task.container_name == container_name
def test_missing_container_name_raises_error(self, mock_workspace):
task = DatastoreRegisterBlobContainer(workspace=mock_workspace)
with pytest.raises(ValueError, match="A container name must be provided."):
task.run()
def test_datastore_name_used_in_register_call(self, mock_workspace, monkeypatch):
container_name = "my_container"
datastore_name = "foobar"
task = DatastoreRegisterBlobContainer(workspace=mock_workspace)
datastore_class = MagicMock()
monkeypatch.setattr(
"prefect.tasks.azureml.datastore.azureml.core.datastore.Datastore",
datastore_class,
)
with set_temporary_config({"cloud.use_local_secrets": True}):
with prefect.context(
secrets=dict(AZ_CREDENTIALS={"ACCOUNT_NAME": "42", "ACCOUNT_KEY": "99"})
):
task.run(container_name=container_name, datastore_name=datastore_name)
assert (
datastore_class.register_azure_blob_container.call_args[1]["datastore_name"]
== datastore_name
)
def test_container_name_used_in_register_call(self, mock_workspace, monkeypatch):
container_name = "my_container"
task = DatastoreRegisterBlobContainer(workspace=mock_workspace)
datastore_class = MagicMock()
monkeypatch.setattr(
"prefect.tasks.azureml.datastore.azureml.core.datastore.Datastore",
datastore_class,
)
with set_temporary_config({"cloud.use_local_secrets": True}):
with prefect.context(
secrets=dict(AZ_CREDENTIALS={"ACCOUNT_NAME": "42", "ACCOUNT_KEY": "99"})
):
task.run(container_name=container_name)
assert (
datastore_class.register_azure_blob_container.call_args[1]["datastore_name"]
== container_name
)
def test_register_call_uses_account_key(self, mock_workspace, monkeypatch):
container_name = "my_container"
task = DatastoreRegisterBlobContainer(workspace=mock_workspace)
datastore_class = MagicMock()
monkeypatch.setattr(
"prefect.tasks.azureml.datastore.azureml.core.datastore.Datastore",
datastore_class,
)
with set_temporary_config({"cloud.use_local_secrets": True}):
with prefect.context(
secrets=dict(AZ_CREDENTIALS={"ACCOUNT_NAME": "42", "ACCOUNT_KEY": "99"})
):
task.run(container_name=container_name)
assert (
datastore_class.register_azure_blob_container.call_args[1]["account_key"]
== "99"
)
def test_register_call_uses_sas_token(self, mock_workspace, monkeypatch):
container_name = "my_container"
task = DatastoreRegisterBlobContainer(workspace=mock_workspace)
datastore_class = MagicMock()
monkeypatch.setattr(
"prefect.tasks.azureml.datastore.azureml.core.datastore.Datastore",
datastore_class,
)
with set_temporary_config({"cloud.use_local_secrets": True}):
with prefect.context(
secrets=dict(AZ_CREDENTIALS={"ACCOUNT_NAME": "42", "SAS_TOKEN": "24"})
):
task.run(container_name=container_name)
assert (
datastore_class.register_azure_blob_container.call_args[1]["sas_token"]
== "24"
)
class TestDatastoreList:
def test_initialization(self, mock_workspace):
task = DatastoreList(workspace=mock_workspace)
assert task.workspace == mock_workspace
def test_return_datastore_dict(self, mock_workspace):
datastores_dict = {"my_datast": MagicMock()}
mock_workspace.datastores = datastores_dict
task = DatastoreList(workspace=mock_workspace)
output_dict = task.run()
assert output_dict == datastores_dict
class TestDatastoreGet:
def test_initialization(self, mock_workspace):
task = DatastoreGet(workspace=mock_workspace)
assert task.workspace == mock_workspace
def test_get_called_with_defined_datastore_name(self, mock_workspace, monkeypatch):
datastore_class = MagicMock()
monkeypatch.setattr(
"prefect.tasks.azureml.datastore.azureml.core.datastore.Datastore",
datastore_class,
)
task = DatastoreGet(workspace=mock_workspace, datastore_name="my_datastore")
task.run()
assert datastore_class.get
def test_get_default_called_without_defined_datastore_name(
self, mock_workspace, monkeypatch
):
datastore_class = MagicMock()
monkeypatch.setattr(
"prefect.tasks.azureml.datastore.azureml.core.datastore.Datastore",
datastore_class,
)
task = DatastoreGet(workspace=mock_workspace)
task.run()
assert datastore_class.get_default
class TestDatastoreUpload:
def test_initialization(self):
relative_root = "/foo/bar"
task = DatastoreUpload(relative_root=relative_root)
assert task.relative_root == relative_root
def test_missing_datastore_raises_error(self):
path = ""
task = DatastoreUpload(path=path)
with pytest.raises(ValueError, match="A datastore must be provided."):
task.run()
def test_missing_datastore_raises_error(self):
datastore = MagicMock()
task = DatastoreUpload(datastore=datastore)
with pytest.raises(ValueError, match="A path must be provided."):
task.run()
def test_upload_files_called_with_single_file(self):
datastore = MagicMock()
path = "foo/bar"
assert not os.path.isdir(path)
task = DatastoreUpload(datastore=datastore, path=path)
task.run()
assert datastore.upload_files.call_args[1]["files"] == [path]
def test_upload_files_called_with_multiple_files(self):
datastore = MagicMock()
path = ["foo/bar", "my/path"]
assert not any([os.path.isdir(path_item) for path_item in path])
task = DatastoreUpload(datastore=datastore, path=path)
task.run()
assert datastore.upload_files.call_args[1]["files"] == path
def test_upload_called_with_directory(self, monkeypatch):
mocked_isdir = MagicMock()
mocked_isdir.return_value = True
monkeypatch.setattr("os.path.isdir", mocked_isdir)
datastore = MagicMock()
path = "foo/bar"
task = DatastoreUpload(datastore=datastore, path=path)
task.run()
assert datastore.upload.call_args[1]["src_dir"] == path
# test with folder
|
[
"prefect.tasks.azureml.DatastoreList",
"unittest.mock.MagicMock",
"prefect.utilities.configuration.set_temporary_config",
"os.path.isdir",
"pytest.raises",
"prefect.tasks.azureml.DatastoreRegisterBlobContainer",
"prefect.tasks.azureml.DatastoreUpload",
"prefect.tasks.azureml.DatastoreGet"
] |
[((371, 396), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'Workspace'}), '(spec=Workspace)\n', (380, 396), False, 'from unittest.mock import MagicMock\n'), ((547, 639), 'prefect.tasks.azureml.DatastoreRegisterBlobContainer', 'DatastoreRegisterBlobContainer', ([], {'workspace': 'mock_workspace', 'container_name': 'container_name'}), '(workspace=mock_workspace, container_name=\n container_name)\n', (577, 639), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((847, 903), 'prefect.tasks.azureml.DatastoreRegisterBlobContainer', 'DatastoreRegisterBlobContainer', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (877, 903), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((1188, 1244), 'prefect.tasks.azureml.DatastoreRegisterBlobContainer', 'DatastoreRegisterBlobContainer', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (1218, 1244), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((1272, 1283), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1281, 1283), False, 'from unittest.mock import MagicMock\n'), ((2017, 2073), 'prefect.tasks.azureml.DatastoreRegisterBlobContainer', 'DatastoreRegisterBlobContainer', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (2047, 2073), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((2101, 2112), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2110, 2112), False, 'from unittest.mock import MagicMock\n'), ((2809, 2865), 'prefect.tasks.azureml.DatastoreRegisterBlobContainer', 'DatastoreRegisterBlobContainer', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (2839, 2865), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((2893, 2904), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2902, 2904), False, 'from unittest.mock import MagicMock\n'), ((3586, 3642), 'prefect.tasks.azureml.DatastoreRegisterBlobContainer', 'DatastoreRegisterBlobContainer', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (3616, 3642), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((3670, 3681), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3679, 3681), False, 'from unittest.mock import MagicMock\n'), ((4318, 4357), 'prefect.tasks.azureml.DatastoreList', 'DatastoreList', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (4331, 4357), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((4587, 4626), 'prefect.tasks.azureml.DatastoreList', 'DatastoreList', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (4600, 4626), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((4800, 4838), 'prefect.tasks.azureml.DatastoreGet', 'DatastoreGet', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (4812, 4838), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((5003, 5014), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5012, 5014), False, 'from unittest.mock import MagicMock\n'), ((5179, 5248), 'prefect.tasks.azureml.DatastoreGet', 'DatastoreGet', ([], {'workspace': 'mock_workspace', 'datastore_name': '"""my_datastore"""'}), "(workspace=mock_workspace, datastore_name='my_datastore')\n", (5191, 5248), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((5445, 5456), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5454, 5456), False, 'from unittest.mock import MagicMock\n'), ((5621, 5659), 'prefect.tasks.azureml.DatastoreGet', 'DatastoreGet', ([], {'workspace': 'mock_workspace'}), '(workspace=mock_workspace)\n', (5633, 5659), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((5838, 5882), 'prefect.tasks.azureml.DatastoreUpload', 'DatastoreUpload', ([], {'relative_root': 'relative_root'}), '(relative_root=relative_root)\n', (5853, 5882), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((6020, 6046), 'prefect.tasks.azureml.DatastoreUpload', 'DatastoreUpload', ([], {'path': 'path'}), '(path=path)\n', (6035, 6046), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((6222, 6233), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6231, 6233), False, 'from unittest.mock import MagicMock\n'), ((6249, 6285), 'prefect.tasks.azureml.DatastoreUpload', 'DatastoreUpload', ([], {'datastore': 'datastore'}), '(datastore=datastore)\n', (6264, 6285), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((6462, 6473), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6471, 6473), False, 'from unittest.mock import MagicMock\n'), ((6553, 6600), 'prefect.tasks.azureml.DatastoreUpload', 'DatastoreUpload', ([], {'datastore': 'datastore', 'path': 'path'}), '(datastore=datastore, path=path)\n', (6568, 6600), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((6773, 6784), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6782, 6784), False, 'from unittest.mock import MagicMock\n'), ((6912, 6959), 'prefect.tasks.azureml.DatastoreUpload', 'DatastoreUpload', ([], {'datastore': 'datastore', 'path': 'path'}), '(datastore=datastore, path=path)\n', (6927, 6959), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((7135, 7146), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (7144, 7146), False, 'from unittest.mock import MagicMock\n'), ((7268, 7279), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (7277, 7279), False, 'from unittest.mock import MagicMock\n'), ((7321, 7368), 'prefect.tasks.azureml.DatastoreUpload', 'DatastoreUpload', ([], {'datastore': 'datastore', 'path': 'path'}), '(datastore=datastore, path=path)\n', (7336, 7368), False, 'from prefect.tasks.azureml import DatastoreRegisterBlobContainer, DatastoreList, DatastoreGet, DatastoreUpload\n'), ((918, 987), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""A container name must be provided."""'}), "(ValueError, match='A container name must be provided.')\n", (931, 987), False, 'import pytest\n'), ((1446, 1501), 'prefect.utilities.configuration.set_temporary_config', 'set_temporary_config', (["{'cloud.use_local_secrets': True}"], {}), "({'cloud.use_local_secrets': True})\n", (1466, 1501), False, 'from prefect.utilities.configuration import set_temporary_config\n'), ((2275, 2330), 'prefect.utilities.configuration.set_temporary_config', 'set_temporary_config', (["{'cloud.use_local_secrets': True}"], {}), "({'cloud.use_local_secrets': True})\n", (2295, 2330), False, 'from prefect.utilities.configuration import set_temporary_config\n'), ((3067, 3122), 'prefect.utilities.configuration.set_temporary_config', 'set_temporary_config', (["{'cloud.use_local_secrets': True}"], {}), "({'cloud.use_local_secrets': True})\n", (3087, 3122), False, 'from prefect.utilities.configuration import set_temporary_config\n'), ((3844, 3899), 'prefect.utilities.configuration.set_temporary_config', 'set_temporary_config', (["{'cloud.use_local_secrets': True}"], {}), "({'cloud.use_local_secrets': True})\n", (3864, 3899), False, 'from prefect.utilities.configuration import set_temporary_config\n'), ((4506, 4517), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4515, 4517), False, 'from unittest.mock import MagicMock\n'), ((6061, 6125), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""A datastore must be provided."""'}), "(ValueError, match='A datastore must be provided.')\n", (6074, 6125), False, 'import pytest\n'), ((6300, 6359), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""A path must be provided."""'}), "(ValueError, match='A path must be provided.')\n", (6313, 6359), False, 'import pytest\n'), ((6518, 6537), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6531, 6537), False, 'import os\n'), ((6848, 6872), 'os.path.isdir', 'os.path.isdir', (['path_item'], {}), '(path_item)\n', (6861, 6872), False, 'import os\n')]
|
from fontTools.ttLib import TTFont
class TTFTool:
def __init__(self, font_path):
self._font = TTFont(font_path)
def char_exists(self, unicode_char) -> bool:
for cmap in self._font['cmap'].tables:
if cmap.isUnicode() and ord(unicode_char) in cmap.cmap:
return True
return False
def chars_missing(self, unicode_chars) -> bool:
for char in list(unicode_chars):
if not self.char_exists(char):
return True
return False
|
[
"fontTools.ttLib.TTFont"
] |
[((108, 125), 'fontTools.ttLib.TTFont', 'TTFont', (['font_path'], {}), '(font_path)\n', (114, 125), False, 'from fontTools.ttLib import TTFont\n')]
|
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
df = pd.read_csv('Database fields - RecommendationMatrix.csv')
df.index = df['Unnamed: 0']
df = df.drop(columns ='Unnamed: 0')
df = df.fillna(0)
def Standarize(row):
new_row = (row - row.mean())/(row.max() - row.min())
return new_row
df_std = df.apply(Standarize)
#Calculating similarity row wise
similarity = cosine_similarity(df_std.T)
similarity_df = pd.DataFrame(similarity, columns=df.columns, index = df.columns)
def get_similar_influencers(influencer,rating):
similar_score = similarity_df[influencer]*(rating-2.5)
similar_score = similar_score.sort_values(ascending = False)
return similar_score
def collect_tastes():
taste = []
for i in range(3):
value = input("Influencer name")
score = input("score")
taste.append(tuple([value,int(score)]))
return taste
def recommend(store_likes):
recommended_influencers = pd.DataFrame()
for influencer,rating in store_likes:
recommended_influencers = recommended_influencers.append(get_similar_influencers(influencer,rating),ignore_index = True)
#return recommended_influencers.sum().sort_values(ascending = False)
return pd.DataFrame(recommended_influencers.sum().sort_values(ascending = False).index.tolist(),columns=["Influencer Ranking"]).head(10)
|
[
"pandas.read_csv",
"sklearn.metrics.pairwise.cosine_similarity",
"pandas.DataFrame"
] |
[((147, 204), 'pandas.read_csv', 'pd.read_csv', (['"""Database fields - RecommendationMatrix.csv"""'], {}), "('Database fields - RecommendationMatrix.csv')\n", (158, 204), True, 'import pandas as pd\n'), ((462, 489), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['df_std.T'], {}), '(df_std.T)\n', (479, 489), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((506, 568), 'pandas.DataFrame', 'pd.DataFrame', (['similarity'], {'columns': 'df.columns', 'index': 'df.columns'}), '(similarity, columns=df.columns, index=df.columns)\n', (518, 568), True, 'import pandas as pd\n'), ((1007, 1021), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1019, 1021), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python
import zmq
def broker(pullAddress, pubAddresses):
"""
- pullAddress - zmq-style address to pull updates from
- pubAddresses - list of zmq-style addresses to publish on
"""
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.bind(pullAddress)
publisher = context.socket(zmq.PUB)
for pubAddress in pubAddresses:
publisher.bind(pubAddress)
while True:
message = receiver.recv()
#do some message storage here
publisher.send(message)
def main():
from optparse import OptionParser
usage = "%prog [OPTIONS] PULL_ADDRESS [[PUB_ADDRESS]..]"
parser = OptionParser(usage=usage)
opts, args = parser.parse_args()
if len(args)<2:
parser.error("Must give a pull address, and at least one publish "
"address in the zmq-style of address")
pullAddress, pubAddresses = args[0], args[1:]
broker(pullAddress, pubAddresses)
if __name__=="__main__":
main()
|
[
"optparse.OptionParser",
"zmq.Context"
] |
[((225, 238), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (236, 238), False, 'import zmq\n'), ((666, 691), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (678, 691), False, 'from optparse import OptionParser\n')]
|
# -*- coding: utf-8 -*-
import logging
import time
from aiohttp import web
from fundingapi.utils import generate_key_pair
from fundingapi.utils import is_address_valid
from ...schema import createPairRequest
from ...schema import validate
logger = logging.getLogger(__name__)
@validate(createPairRequest)
async def handler(request, data):
env = request.app['env']
address = data['address']
expired_at = int(time.time()) + 86400
api_key = request.headers.get('X-API-KEY')
if not api_key:
return web.json_response({'error': 'Need API Key'}, status=400)
if not is_address_valid(address):
return web.json_response({'error': 'Not valid address'}, status=400)
inst = generate_key_pair()
payload = [api_key, address, inst['private'], inst['address'], int(expired_at)]
result = await env.tnt.call('register_client', payload)
if not result.body:
return web.json_response({'error': 'Not valid or not active API Key'}, status=400)
result = result.body[0]
logger.info(f"Register mapping {address} -> {inst['address']} for {api_key}")
return web.json_response({'id': result['uuid'], 'address': inst['address']})
|
[
"fundingapi.utils.generate_key_pair",
"time.time",
"aiohttp.web.json_response",
"fundingapi.utils.is_address_valid",
"logging.getLogger"
] |
[((252, 279), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'import logging\n'), ((713, 732), 'fundingapi.utils.generate_key_pair', 'generate_key_pair', ([], {}), '()\n', (730, 732), False, 'from fundingapi.utils import generate_key_pair\n'), ((1114, 1183), 'aiohttp.web.json_response', 'web.json_response', (["{'id': result['uuid'], 'address': inst['address']}"], {}), "({'id': result['uuid'], 'address': inst['address']})\n", (1131, 1183), False, 'from aiohttp import web\n'), ((528, 584), 'aiohttp.web.json_response', 'web.json_response', (["{'error': 'Need API Key'}"], {'status': '(400)'}), "({'error': 'Need API Key'}, status=400)\n", (545, 584), False, 'from aiohttp import web\n'), ((597, 622), 'fundingapi.utils.is_address_valid', 'is_address_valid', (['address'], {}), '(address)\n', (613, 622), False, 'from fundingapi.utils import is_address_valid\n'), ((639, 700), 'aiohttp.web.json_response', 'web.json_response', (["{'error': 'Not valid address'}"], {'status': '(400)'}), "({'error': 'Not valid address'}, status=400)\n", (656, 700), False, 'from aiohttp import web\n'), ((916, 991), 'aiohttp.web.json_response', 'web.json_response', (["{'error': 'Not valid or not active API Key'}"], {'status': '(400)'}), "({'error': 'Not valid or not active API Key'}, status=400)\n", (933, 991), False, 'from aiohttp import web\n'), ((425, 436), 'time.time', 'time.time', ([], {}), '()\n', (434, 436), False, 'import time\n')]
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create TF graphs for calculating log-mel-spectral features.
NOTE: This code is very experimental and will likely change, both in interface
and what it outputs.
The single published method is build_mel_calculation_graph, which
will assemble a TF graph from a provided waveform input vector
through to a (num_frames, frame_width, num_mel_bins) tensor of log-
transformed mel spectrogram patches, suitable for feeding the input
to a typical classifier. All the mel calculation parameters
are available as options, but default to their standard values
(e.g. frame_width=96, frame_hop=10). The input waveform can have
size (None,), meaning it will be specified at run-time.
with tflite_compatible=True, the returned graph is constructed only
from tflite-compatible ops (i.e., it uses matmul for the DFT, and
explicitly unrolled framing). In this case, the input waveform tensor
must have an explicit size at graph-building time.
"""
import fractions
import math
from magenta.music import mfcc_mel
import numpy as np
import tensorflow.compat.v1 as tf
def _stft_magnitude_full_tf(waveform_input, window_length_samples,
hop_length_samples, fft_length):
"""Calculate STFT magnitude (spectrogram) using tf.signal ops."""
stft_magnitude = tf.abs(
tf.signal.stft(
waveform_input,
frame_length=window_length_samples,
frame_step=hop_length_samples,
fft_length=fft_length),
name='magnitude_spectrogram')
return stft_magnitude
def _dft_matrix(dft_length):
"""Calculate the full DFT matrix in numpy."""
omega = (0 + 1j) * 2.0 * np.pi / float(dft_length)
# Don't include 1/sqrt(N) scaling, tf.signal.rfft doesn't apply it.
return np.exp(omega * np.outer(np.arange(dft_length), np.arange(dft_length)))
def _naive_rdft(signal_tensor, fft_length):
"""Implement real-input Fourier Transform by matmul."""
# We are right-multiplying by the DFT matrix, and we are keeping
# only the first half ("positive frequencies").
# So discard the second half of rows, but transpose the array for
# right-multiplication.
# The DFT matrix is symmetric, so we could have done it more
# directly, but this reflects our intention better.
complex_dft_matrix_kept_values = _dft_matrix(fft_length)[:(
fft_length // 2 + 1), :].transpose()
real_dft_tensor = tf.constant(
np.real(complex_dft_matrix_kept_values).astype(np.float32),
name='real_dft_matrix')
imag_dft_tensor = tf.constant(
np.imag(complex_dft_matrix_kept_values).astype(np.float32),
name='imaginary_dft_matrix')
signal_frame_length = signal_tensor.shape[-1].value
half_pad = (fft_length - signal_frame_length) // 2
pad_values = tf.concat([
tf.zeros([tf.rank(signal_tensor) - 1, 2], tf.int32),
[[half_pad, fft_length - signal_frame_length - half_pad]]
],
axis=0)
padded_signal = tf.pad(signal_tensor, pad_values)
result_real_part = tf.matmul(padded_signal, real_dft_tensor)
result_imag_part = tf.matmul(padded_signal, imag_dft_tensor)
return result_real_part, result_imag_part
def _fixed_frame(signal, frame_length, frame_step, first_axis=False):
"""tflite-compatible tf.signal.frame for fixed-size input.
Args:
signal: Tensor containing signal(s).
frame_length: Number of samples to put in each frame.
frame_step: Sample advance between successive frames.
first_axis: If true, framing is applied to first axis of tensor; otherwise,
it is applied to last axis.
Returns:
A new tensor where the last axis (or first, if first_axis) of input
signal has been replaced by a (num_frames, frame_length) array of individual
frames where each frame is drawn frame_step samples after the previous one.
Raises:
ValueError: if signal has an undefined axis length. This routine only
supports framing of signals whose shape is fixed at graph-build time.
"""
signal_shape = signal.shape.as_list()
if first_axis:
length_samples = signal_shape[0]
else:
length_samples = signal_shape[-1]
if length_samples <= 0:
raise ValueError('fixed framing requires predefined constant signal length')
num_frames = max(0, 1 + (length_samples - frame_length) // frame_step)
if first_axis:
inner_dimensions = signal_shape[1:]
result_shape = [num_frames, frame_length] + inner_dimensions
gather_axis = 0
else:
outer_dimensions = signal_shape[:-1]
result_shape = outer_dimensions + [num_frames, frame_length]
# Currently tflite's gather only supports axis==0, but that may still
# work if we want the last of 1 axes.
gather_axis = len(outer_dimensions)
subframe_length = fractions.gcd(frame_length, frame_step) # pylint: disable=deprecated-method
subframes_per_frame = frame_length // subframe_length
subframes_per_hop = frame_step // subframe_length
num_subframes = length_samples // subframe_length
if first_axis:
trimmed_input_size = [num_subframes * subframe_length] + inner_dimensions
subframe_shape = [num_subframes, subframe_length] + inner_dimensions
else:
trimmed_input_size = outer_dimensions + [num_subframes * subframe_length]
subframe_shape = outer_dimensions + [num_subframes, subframe_length]
subframes = tf.reshape(
tf.slice(
signal,
begin=np.zeros(len(signal_shape), np.int32),
size=trimmed_input_size), subframe_shape)
# frame_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate frame in subframes. For example:
# [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]]
frame_selector = np.reshape(
np.arange(num_frames) * subframes_per_hop, [num_frames, 1])
# subframe_selector is a [num_frames, subframes_per_frame] tensor
# that indexes into the appropriate subframe within a frame. For example:
# [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]
subframe_selector = np.reshape(
np.arange(subframes_per_frame), [1, subframes_per_frame])
# Adding the 2 selector tensors together produces a [num_frames,
# subframes_per_frame] tensor of indices to use with tf.gather to select
# subframes from subframes. We then reshape the inner-most subframes_per_frame
# dimension to stitch the subframes together into frames. For example:
# [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]].
selector = frame_selector + subframe_selector
frames = tf.reshape(
tf.gather(subframes, selector.astype(np.int32), axis=gather_axis),
result_shape)
return frames
def _stft_tflite(signal, frame_length, frame_step, fft_length):
"""tflite-compatible implementation of tf.signal.stft.
Compute the short-time Fourier transform of a 1D input while avoiding tf ops
that are not currently supported in tflite (Rfft, Range, SplitV).
fft_length must be fixed. A Hann window is of frame_length is always
applied.
Since fixed (precomputed) framing must be used, signal.shape[-1] must be a
specific value (so "?"/None is not supported).
Args:
signal: 1D tensor containing the time-domain waveform to be transformed.
frame_length: int, the number of points in each Fourier frame.
frame_step: int, the number of samples to advance between successive frames.
fft_length: int, the size of the Fourier transform to apply.
Returns:
Two (num_frames, fft_length) tensors containing the real and imaginary parts
of the short-time Fourier transform of the input signal.
"""
# Make the window be shape (1, frame_length) instead of just frame_length
# in an effort to help the tflite broadcast logic.
window = tf.reshape(
tf.constant(
(0.5 - 0.5 * np.cos(2 * np.pi * np.arange(0, 1.0, 1.0 / frame_length))
).astype(np.float32),
name='window'), [1, frame_length])
framed_signal = _fixed_frame(
signal, frame_length, frame_step, first_axis=False)
framed_signal *= window
real_spectrogram, imag_spectrogram = _naive_rdft(framed_signal, fft_length)
return real_spectrogram, imag_spectrogram
def _stft_magnitude_tflite(waveform_input, window_length_samples,
hop_length_samples, fft_length):
"""Calculate spectrogram avoiding tflite incompatible ops."""
real_stft, imag_stft = _stft_tflite(
waveform_input,
frame_length=window_length_samples,
frame_step=hop_length_samples,
fft_length=fft_length)
stft_magnitude = tf.sqrt(
tf.add(real_stft * real_stft, imag_stft * imag_stft),
name='magnitude_spectrogram')
return stft_magnitude
def build_mel_calculation_graph(waveform_input,
sample_rate=16000,
window_length_seconds=0.025,
hop_length_seconds=0.010,
num_mel_bins=64,
lower_edge_hz=125.0,
upper_edge_hz=7500.0,
frame_width=96,
frame_hop=10,
tflite_compatible=False):
"""Build a TF graph to go from waveform to mel spectrum patches.
Args:
waveform_input: 1D Tensor which will be filled with 16 kHz waveform as
tf.float32.
sample_rate: Scalar giving the sampling rate of the waveform. Only 16 kHz
is acceptable at present.
window_length_seconds: Duration of window used for each Fourier transform.
hop_length_seconds: Time shift between successive analysis time frames.
num_mel_bins: The number of mel frequency bins to calculate.
lower_edge_hz: Frequency boundary at bottom edge of mel mapping.
upper_edge_hz: Frequency boundary at top edge of mel mapping.
frame_width: The number of successive time frames to include in each patch.
frame_hop: The frame advance between successive patches.
tflite_compatible: Avoid ops not currently supported in tflite.
Returns:
Tensor holding [num_patches, frame_width, num_mel_bins] log-mel-spectrogram
patches.
"""
# `waveform_input` is a [?] vector as a tensor.
# `magnitude_spectrogram` is a [?, fft_length/2 + 1] tensor of spectrograms.
# Derive the dependent parameters.
window_length_samples = int(round(window_length_seconds * sample_rate))
hop_length_samples = int(round(hop_length_seconds * sample_rate))
fft_length = 2**int(
math.ceil(math.log(window_length_samples) / math.log(2.0)))
if tflite_compatible:
magnitude_spectrogram = _stft_magnitude_tflite(
waveform_input, window_length_samples, hop_length_samples, fft_length)
else:
magnitude_spectrogram = _stft_magnitude_full_tf(
waveform_input, window_length_samples, hop_length_samples, fft_length)
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = magnitude_spectrogram.shape[-1].value
if tflite_compatible:
linear_to_mel_weight_matrix = tf.constant(
mfcc_mel.SpectrogramToMelMatrix(num_mel_bins, num_spectrogram_bins,
sample_rate, lower_edge_hz,
upper_edge_hz).astype(np.float32),
name='linear_to_mel_matrix')
else:
# In full tf, the mel weight matrix is calculated at run time within the
# TF graph. This avoids including a matrix of 64 x 256 float values (i.e.,
# 100 kB or more, depending on the representation) in the exported graph.
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hz,
upper_edge_hz)
mel_spectrogram = tf.matmul(
magnitude_spectrogram,
linear_to_mel_weight_matrix,
name='mel_spectrogram')
log_offset = 0.001
log_mel_spectrogram = tf.log(
mel_spectrogram + log_offset, name='log_mel_spectrogram')
# log_mel_spectrogram is a [?, num_mel_bins] gram.
if tflite_compatible:
features = _fixed_frame(
log_mel_spectrogram,
frame_length=frame_width,
frame_step=frame_hop,
first_axis=True)
else:
features = tf.signal.frame(
log_mel_spectrogram,
frame_length=frame_width,
frame_step=frame_hop,
axis=0)
# features is [num_patches, frame_width, num_mel_bins].
return features
|
[
"tensorflow.compat.v1.pad",
"fractions.gcd",
"magenta.music.mfcc_mel.SpectrogramToMelMatrix",
"tensorflow.compat.v1.rank",
"tensorflow.compat.v1.signal.linear_to_mel_weight_matrix",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.signal.frame",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.add",
"numpy.imag",
"tensorflow.compat.v1.signal.stft",
"numpy.arange",
"numpy.real",
"math.log"
] |
[((3488, 3521), 'tensorflow.compat.v1.pad', 'tf.pad', (['signal_tensor', 'pad_values'], {}), '(signal_tensor, pad_values)\n', (3494, 3521), True, 'import tensorflow.compat.v1 as tf\n'), ((3543, 3584), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['padded_signal', 'real_dft_tensor'], {}), '(padded_signal, real_dft_tensor)\n', (3552, 3584), True, 'import tensorflow.compat.v1 as tf\n'), ((3606, 3647), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['padded_signal', 'imag_dft_tensor'], {}), '(padded_signal, imag_dft_tensor)\n', (3615, 3647), True, 'import tensorflow.compat.v1 as tf\n'), ((5271, 5310), 'fractions.gcd', 'fractions.gcd', (['frame_length', 'frame_step'], {}), '(frame_length, frame_step)\n', (5284, 5310), False, 'import fractions\n'), ((12182, 12272), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['magnitude_spectrogram', 'linear_to_mel_weight_matrix'], {'name': '"""mel_spectrogram"""'}), "(magnitude_spectrogram, linear_to_mel_weight_matrix, name=\n 'mel_spectrogram')\n", (12191, 12272), True, 'import tensorflow.compat.v1 as tf\n'), ((12332, 12396), 'tensorflow.compat.v1.log', 'tf.log', (['(mel_spectrogram + log_offset)'], {'name': '"""log_mel_spectrogram"""'}), "(mel_spectrogram + log_offset, name='log_mel_spectrogram')\n", (12338, 12396), True, 'import tensorflow.compat.v1 as tf\n'), ((1870, 1994), 'tensorflow.compat.v1.signal.stft', 'tf.signal.stft', (['waveform_input'], {'frame_length': 'window_length_samples', 'frame_step': 'hop_length_samples', 'fft_length': 'fft_length'}), '(waveform_input, frame_length=window_length_samples,\n frame_step=hop_length_samples, fft_length=fft_length)\n', (1884, 1994), True, 'import tensorflow.compat.v1 as tf\n'), ((6516, 6546), 'numpy.arange', 'np.arange', (['subframes_per_frame'], {}), '(subframes_per_frame)\n', (6525, 6546), True, 'import numpy as np\n'), ((9000, 9052), 'tensorflow.compat.v1.add', 'tf.add', (['(real_stft * real_stft)', '(imag_stft * imag_stft)'], {}), '(real_stft * real_stft, imag_stft * imag_stft)\n', (9006, 9052), True, 'import tensorflow.compat.v1 as tf\n'), ((12027, 12147), 'tensorflow.compat.v1.signal.linear_to_mel_weight_matrix', 'tf.signal.linear_to_mel_weight_matrix', (['num_mel_bins', 'num_spectrogram_bins', 'sample_rate', 'lower_edge_hz', 'upper_edge_hz'], {}), '(num_mel_bins, num_spectrogram_bins,\n sample_rate, lower_edge_hz, upper_edge_hz)\n', (12064, 12147), True, 'import tensorflow.compat.v1 as tf\n'), ((12651, 12748), 'tensorflow.compat.v1.signal.frame', 'tf.signal.frame', (['log_mel_spectrogram'], {'frame_length': 'frame_width', 'frame_step': 'frame_hop', 'axis': '(0)'}), '(log_mel_spectrogram, frame_length=frame_width, frame_step=\n frame_hop, axis=0)\n', (12666, 12748), True, 'import tensorflow.compat.v1 as tf\n'), ((6224, 6245), 'numpy.arange', 'np.arange', (['num_frames'], {}), '(num_frames)\n', (6233, 6245), True, 'import numpy as np\n'), ((2328, 2349), 'numpy.arange', 'np.arange', (['dft_length'], {}), '(dft_length)\n', (2337, 2349), True, 'import numpy as np\n'), ((2351, 2372), 'numpy.arange', 'np.arange', (['dft_length'], {}), '(dft_length)\n', (2360, 2372), True, 'import numpy as np\n'), ((2951, 2990), 'numpy.real', 'np.real', (['complex_dft_matrix_kept_values'], {}), '(complex_dft_matrix_kept_values)\n', (2958, 2990), True, 'import numpy as np\n'), ((3080, 3119), 'numpy.imag', 'np.imag', (['complex_dft_matrix_kept_values'], {}), '(complex_dft_matrix_kept_values)\n', (3087, 3119), True, 'import numpy as np\n'), ((10944, 10975), 'math.log', 'math.log', (['window_length_samples'], {}), '(window_length_samples)\n', (10952, 10975), False, 'import math\n'), ((10978, 10991), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (10986, 10991), False, 'import math\n'), ((11502, 11616), 'magenta.music.mfcc_mel.SpectrogramToMelMatrix', 'mfcc_mel.SpectrogramToMelMatrix', (['num_mel_bins', 'num_spectrogram_bins', 'sample_rate', 'lower_edge_hz', 'upper_edge_hz'], {}), '(num_mel_bins, num_spectrogram_bins,\n sample_rate, lower_edge_hz, upper_edge_hz)\n', (11533, 11616), False, 'from magenta.music import mfcc_mel\n'), ((3325, 3347), 'tensorflow.compat.v1.rank', 'tf.rank', (['signal_tensor'], {}), '(signal_tensor)\n', (3332, 3347), True, 'import tensorflow.compat.v1 as tf\n'), ((8251, 8288), 'numpy.arange', 'np.arange', (['(0)', '(1.0)', '(1.0 / frame_length)'], {}), '(0, 1.0, 1.0 / frame_length)\n', (8260, 8288), True, 'import numpy as np\n')]
|
# Author: <NAME>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import time
import os
import sickbeard
from sickbeard import encodingKludge as ek
from sickbeard import logger,db
from sickbeard import helpers
from sickbeard import search_queue
from sickbeard.common import SNATCHED, SNATCHED_PROPER, SNATCHED_FRENCH, DOWNLOADED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED, UNKNOWN
from lib.trakt import *
class TraktChecker():
def __init__(self):
self.todoWanted = []
self.todoBacklog = []
def run(self):
if sickbeard.TRAKT_USE_WATCHLIST:
self.todoWanted = [] #its about to all get re-added
if len(sickbeard.ROOT_DIRS.split('|')) < 2:
logger.log(u"No default root directory", logger.ERROR)
return
self.updateShows()
self.updateEpisodes()
def updateShows(self):
logger.log(u"Starting trakt show watchlist check", logger.DEBUG)
watchlist = TraktCall("user/watchlist/shows.json/%API%/" + sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_API, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
if watchlist is None:
logger.log(u"Could not connect to trakt service, aborting watchlist update", logger.DEBUG)
return
for show in watchlist:
if int(sickbeard.TRAKT_METHOD_ADD) != 2:
self.addDefaultShow(show["tvdb_id"], show["title"], SKIPPED)
else:
self.addDefaultShow(show["tvdb_id"], show["title"], WANTED)
if int(sickbeard.TRAKT_METHOD_ADD) == 1:
newShow = helpers.findCertainShow(sickbeard.showList, int(show["tvdb_id"]))
if newShow is not None:
self.setEpisodeToWanted(newShow, 1, 1)
self.startBacklog(newShow)
else:
self.todoWanted.append((int(show["tvdb_id"]), 1, 1))
if int(sickbeard.TRAKT_METHOD_ADD) == 3:
newShow = helpers.findCertainShow(sickbeard.showList, int(show["tvdb_id"]))
if newShow is not None:
for ep in range(1,4):
self.setEpisodeToWanted(newShow, 1, ep)
self.startBacklog(newShow)
else:
for ep in range(1,4):
self.todoWanted.append((int(show["tvdb_id"]), 1, ep))
#self.todoWanted.append((int(show["tvdb_id"]), -1, -1)) #used to pause new shows if the settings say to
def updateEpisodes(self):
"""
Sets episodes to wanted that are in trakt watchlist
"""
logger.log(u"Starting trakt episode watchlist check", logger.DEBUG)
watchlist = TraktCall("user/watchlist/episodes.json/%API%/" + sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_API, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
if watchlist is None:
logger.log(u"Could not connect to trakt service, aborting watchlist update", logger.DEBUG)
return
for show in watchlist:
self.addDefaultShow(show["tvdb_id"], show["title"], SKIPPED)
newShow = helpers.findCertainShow(sickbeard.showList, int(show["tvdb_id"]))
for episode in show["episodes"]:
if newShow is not None:
self.setEpisodeToWanted(newShow, episode["season"], episode["number"])
else:
self.todoWanted.append((int(show["tvdb_id"]), episode["season"], episode["number"]))
self.startBacklog(newShow)
def addDefaultShow(self, tvdbid, name, status):
"""
Adds a new show with the default settings
"""
showObj = helpers.findCertainShow(sickbeard.showList, int(tvdbid))
if showObj != None:
return
logger.log(u"Adding show " + tvdbid)
root_dirs = sickbeard.ROOT_DIRS.split('|')
location = root_dirs[int(root_dirs[0]) + 1]
showPath = ek.ek(os.path.join, location, helpers.sanitizeFileName(name))
dir_exists = helpers.makeDir(showPath)
if not dir_exists:
logger.log(u"Unable to create the folder " + showPath + ", can't add the show", logger.ERROR)
return
else:
helpers.chmodAsParent(showPath)
sickbeard.showQueueScheduler.action.addShow(int(tvdbid), showPath, status, int(sickbeard.QUALITY_DEFAULT), int(sickbeard.FLATTEN_FOLDERS_DEFAULT),"fr", int(sickbeard.SUBTITLES_DEFAULT), sickbeard.AUDIO_SHOW_DEFAULT)
def setEpisodeToWanted(self, show, s, e):
"""
Sets an episode to wanted, only is it is currently skipped
"""
epObj = show.getEpisode(int(s), int(e))
if epObj == None:
return
with epObj.lock:
if epObj.status != SKIPPED:
return
logger.log(u"Setting episode s"+str(s)+"e"+str(e)+" of show " + show.name + " to wanted")
# figure out what segment the episode is in and remember it so we can backlog it
if epObj.show.air_by_date:
ep_segment = str(epObj.airdate)[:7]
else:
ep_segment = epObj.season
epObj.status = WANTED
epObj.saveToDB()
backlog = (show, ep_segment)
if self.todoBacklog.count(backlog)==0:
self.todoBacklog.append(backlog)
def manageNewShow(self, show):
episodes = [i for i in self.todoWanted if i[0] == show.tvdbid]
for episode in episodes:
self.todoWanted.remove(episode)
if episode[1] == -1 and sickbeard.TRAKT_START_PAUSED:
show.paused = 1
continue
self.setEpisodeToWanted(show, episode[1], episode[2])
self.startBacklog(show)
def startBacklog(self, show):
segments = [i for i in self.todoBacklog if i[0] == show]
for segment in segments:
cur_backlog_queue_item = search_queue.BacklogQueueItem(show, segment[1])
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item)
logger.log(u"Starting backlog for " + show.name + " season " + str(segment[1]) + " because some eps were set to wanted")
self.todoBacklog.remove(segment)
|
[
"sickbeard.helpers.makeDir",
"sickbeard.helpers.chmodAsParent",
"sickbeard.helpers.sanitizeFileName",
"sickbeard.searchQueueScheduler.action.add_item",
"sickbeard.search_queue.BacklogQueueItem",
"sickbeard.logger.log",
"sickbeard.ROOT_DIRS.split"
] |
[((1554, 1618), 'sickbeard.logger.log', 'logger.log', (['u"""Starting trakt show watchlist check"""', 'logger.DEBUG'], {}), "(u'Starting trakt show watchlist check', logger.DEBUG)\n", (1564, 1618), False, 'from sickbeard import logger, db\n'), ((3340, 3407), 'sickbeard.logger.log', 'logger.log', (['u"""Starting trakt episode watchlist check"""', 'logger.DEBUG'], {}), "(u'Starting trakt episode watchlist check', logger.DEBUG)\n", (3350, 3407), False, 'from sickbeard import logger, db\n'), ((4520, 4556), 'sickbeard.logger.log', 'logger.log', (["(u'Adding show ' + tvdbid)"], {}), "(u'Adding show ' + tvdbid)\n", (4530, 4556), False, 'from sickbeard import logger, db\n'), ((4577, 4607), 'sickbeard.ROOT_DIRS.split', 'sickbeard.ROOT_DIRS.split', (['"""|"""'], {}), "('|')\n", (4602, 4607), False, 'import sickbeard\n'), ((4763, 4788), 'sickbeard.helpers.makeDir', 'helpers.makeDir', (['showPath'], {}), '(showPath)\n', (4778, 4788), False, 'from sickbeard import helpers\n'), ((1827, 1921), 'sickbeard.logger.log', 'logger.log', (['u"""Could not connect to trakt service, aborting watchlist update"""', 'logger.DEBUG'], {}), "(u'Could not connect to trakt service, aborting watchlist update',\n logger.DEBUG)\n", (1837, 1921), False, 'from sickbeard import logger, db\n'), ((3619, 3713), 'sickbeard.logger.log', 'logger.log', (['u"""Could not connect to trakt service, aborting watchlist update"""', 'logger.DEBUG'], {}), "(u'Could not connect to trakt service, aborting watchlist update',\n logger.DEBUG)\n", (3629, 3713), False, 'from sickbeard import logger, db\n'), ((4710, 4740), 'sickbeard.helpers.sanitizeFileName', 'helpers.sanitizeFileName', (['name'], {}), '(name)\n', (4734, 4740), False, 'from sickbeard import helpers\n'), ((4828, 4925), 'sickbeard.logger.log', 'logger.log', (['(u\'Unable to create the folder \' + showPath + ", can\'t add the show")', 'logger.ERROR'], {}), '(u\'Unable to create the folder \' + showPath +\n ", can\'t add the show", logger.ERROR)\n', (4838, 4925), False, 'from sickbeard import logger, db\n'), ((4967, 4998), 'sickbeard.helpers.chmodAsParent', 'helpers.chmodAsParent', (['showPath'], {}), '(showPath)\n', (4988, 4998), False, 'from sickbeard import helpers\n'), ((6677, 6724), 'sickbeard.search_queue.BacklogQueueItem', 'search_queue.BacklogQueueItem', (['show', 'segment[1]'], {}), '(show, segment[1])\n', (6706, 6724), False, 'from sickbeard import search_queue\n'), ((6737, 6807), 'sickbeard.searchQueueScheduler.action.add_item', 'sickbeard.searchQueueScheduler.action.add_item', (['cur_backlog_queue_item'], {}), '(cur_backlog_queue_item)\n', (6783, 6807), False, 'import sickbeard\n'), ((1374, 1428), 'sickbeard.logger.log', 'logger.log', (['u"""No default root directory"""', 'logger.ERROR'], {}), "(u'No default root directory', logger.ERROR)\n", (1384, 1428), False, 'from sickbeard import logger, db\n'), ((1321, 1351), 'sickbeard.ROOT_DIRS.split', 'sickbeard.ROOT_DIRS.split', (['"""|"""'], {}), "('|')\n", (1346, 1351), False, 'import sickbeard\n')]
|
import logging
from compas.datastructures import Mesh
from compas.geometry import Frame
import compas_fab
from compas_fab.robots import *
from compas_fab.robots import rfl
from compas_fab.backends import VrepClient
# Configure logging to DEBUG to see detailed timing of the path planning
logging.basicConfig(level=logging.DEBUG)
# Configure parameters for path planning
start_pose = Frame((7.453, 2.905, 0.679), (1, 0, 0), (0, -1, 0))
goal_pose = Frame((5.510, 5.900, 1.810), (0, 0, -1), (0, 1, 0))
planner_id = 'rrtconnect'
max_trials = 1
resolution = 0.02
building_member = Mesh.from_obj(compas_fab.get('planning_scene/timber_beam.obj'))
structure = [Mesh.from_obj(compas_fab.get('planning_scene/timber_structure.obj'))]
metric = [0.1] * 9
fast_search = True
with VrepClient(debug=True) as client:
robot = rfl.Robot('A', client=client)
client.pick_building_member(robot, building_member, start_pose)
path = client.plan_motion(robot,
goal_pose,
metric_values=metric,
collision_meshes=structure,
planner_id=planner_id,
trials=max_trials,
resolution=resolution,
shallow_state_search=fast_search)
print('Found path of %d steps' % len(path))
|
[
"compas_fab.backends.VrepClient",
"logging.basicConfig",
"compas_fab.get",
"compas_fab.robots.rfl.Robot",
"compas.geometry.Frame"
] |
[((291, 331), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (310, 331), False, 'import logging\n'), ((392, 443), 'compas.geometry.Frame', 'Frame', (['(7.453, 2.905, 0.679)', '(1, 0, 0)', '(0, -1, 0)'], {}), '((7.453, 2.905, 0.679), (1, 0, 0), (0, -1, 0))\n', (397, 443), False, 'from compas.geometry import Frame\n'), ((462, 509), 'compas.geometry.Frame', 'Frame', (['(5.51, 5.9, 1.81)', '(0, 0, -1)', '(0, 1, 0)'], {}), '((5.51, 5.9, 1.81), (0, 0, -1), (0, 1, 0))\n', (467, 509), False, 'from compas.geometry import Frame\n'), ((620, 668), 'compas_fab.get', 'compas_fab.get', (['"""planning_scene/timber_beam.obj"""'], {}), "('planning_scene/timber_beam.obj')\n", (634, 668), False, 'import compas_fab\n'), ((816, 838), 'compas_fab.backends.VrepClient', 'VrepClient', ([], {'debug': '(True)'}), '(debug=True)\n', (826, 838), False, 'from compas_fab.backends import VrepClient\n'), ((862, 891), 'compas_fab.robots.rfl.Robot', 'rfl.Robot', (['"""A"""'], {'client': 'client'}), "('A', client=client)\n", (871, 891), False, 'from compas_fab.robots import rfl\n'), ((703, 756), 'compas_fab.get', 'compas_fab.get', (['"""planning_scene/timber_structure.obj"""'], {}), "('planning_scene/timber_structure.obj')\n", (717, 756), False, 'import compas_fab\n')]
|
# -*- coding: utf-8 -*-
"""
Created on 01/26/2022
@author: maxcurie
"""
import numpy as np
import csv
from mpi4py import MPI
from Dispersion import VectorFinder_auto_Extensive
from MPI_tools import task_dis
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
print('*******rank='+str(rank)+'*************')
if rank==0:
#**********Start of user block***************
path='.'
Output_csv=path+'/0MTM_scan.csv'
nu_list=np.arange(0.1,10.,0.5)
zeff_list=np.arange(1,2.5,0.2)
eta_list=np.arange(0.5,3.,0.2)
shat_list=np.arange(0.02,0.1,0.01)
beta_list=np.arange(0.0005,0.003,0.0003)
ky_list=np.arange(0.01,0.1,0.01)
mu_list=np.arange(0,4.,0.1)
xstar=10.
ModIndex=1 #global dispersion
#**********end of user block****************
with open(Output_csv, 'w', newline='') as csvfile: #clear all and then write a row
csv_data = csv.writer(csvfile, delimiter=',')
csv_data.writerow(['omega_omega_n','gamma_omega_n',\
'nu','zeff','eta','shat','beta','ky',\
'ModIndex','mu','xstar'])
csvfile.close()
para_list=[]
for nu in nu_list:
for zeff in zeff_list:
for eta in eta_list:
for shat in shat_list:
for beta in beta_list:
for ky in ky_list:
for mu in mu_list:
para_list.append([nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar,Output_csv])
np.random.shuffle(para_list)
task_list = task_dis(size,para_list)
for i in range(size-1):
comm.send(task_list[i],dest=i+1) #sending the data
elif rank!=0:
task_list_rank=comm.recv(source=0) #recieve the data
for para in task_list_rank:
[nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar,Output_csv]=para
w0=VectorFinder_auto_Extensive(nu,zeff,eta,shat,beta,ky,ModIndex,mu,xstar)
#w0=0.+0j
omega=np.real(w0)
gamma=np.imag(w0)
print(str(omega)+','+str(gamma)+','+str(nu)+','+str(zeff)+','\
+str(eta)+','+str(shat)+','+str(beta)+','+str(ky)+','\
+str(ModIndex)+','+str(mu)+','+str(xstar))
with open(Output_csv, 'a+', newline='') as csvfile: #adding a row
csv_data = csv.writer(csvfile, delimiter=',')
csv_data.writerow([ omega,gamma,nu,zeff,eta,shat,beta,ky,\
ModIndex,mu,xstar ])
csvfile.close()
print(para)
|
[
"csv.writer",
"MPI_tools.task_dis",
"numpy.imag",
"numpy.arange",
"Dispersion.VectorFinder_auto_Extensive",
"numpy.real",
"numpy.random.shuffle"
] |
[((455, 480), 'numpy.arange', 'np.arange', (['(0.1)', '(10.0)', '(0.5)'], {}), '(0.1, 10.0, 0.5)\n', (464, 480), True, 'import numpy as np\n'), ((492, 514), 'numpy.arange', 'np.arange', (['(1)', '(2.5)', '(0.2)'], {}), '(1, 2.5, 0.2)\n', (501, 514), True, 'import numpy as np\n'), ((526, 550), 'numpy.arange', 'np.arange', (['(0.5)', '(3.0)', '(0.2)'], {}), '(0.5, 3.0, 0.2)\n', (535, 550), True, 'import numpy as np\n'), ((562, 588), 'numpy.arange', 'np.arange', (['(0.02)', '(0.1)', '(0.01)'], {}), '(0.02, 0.1, 0.01)\n', (571, 588), True, 'import numpy as np\n'), ((601, 633), 'numpy.arange', 'np.arange', (['(0.0005)', '(0.003)', '(0.0003)'], {}), '(0.0005, 0.003, 0.0003)\n', (610, 633), True, 'import numpy as np\n'), ((644, 670), 'numpy.arange', 'np.arange', (['(0.01)', '(0.1)', '(0.01)'], {}), '(0.01, 0.1, 0.01)\n', (653, 670), True, 'import numpy as np\n'), ((681, 703), 'numpy.arange', 'np.arange', (['(0)', '(4.0)', '(0.1)'], {}), '(0, 4.0, 0.1)\n', (690, 703), True, 'import numpy as np\n'), ((1509, 1537), 'numpy.random.shuffle', 'np.random.shuffle', (['para_list'], {}), '(para_list)\n', (1526, 1537), True, 'import numpy as np\n'), ((1554, 1579), 'MPI_tools.task_dis', 'task_dis', (['size', 'para_list'], {}), '(size, para_list)\n', (1562, 1579), False, 'from MPI_tools import task_dis\n'), ((909, 943), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (919, 943), False, 'import csv\n'), ((1857, 1936), 'Dispersion.VectorFinder_auto_Extensive', 'VectorFinder_auto_Extensive', (['nu', 'zeff', 'eta', 'shat', 'beta', 'ky', 'ModIndex', 'mu', 'xstar'], {}), '(nu, zeff, eta, shat, beta, ky, ModIndex, mu, xstar)\n', (1884, 1936), False, 'from Dispersion import VectorFinder_auto_Extensive\n'), ((1962, 1973), 'numpy.real', 'np.real', (['w0'], {}), '(w0)\n', (1969, 1973), True, 'import numpy as np\n'), ((1988, 1999), 'numpy.imag', 'np.imag', (['w0'], {}), '(w0)\n', (1995, 1999), True, 'import numpy as np\n'), ((2306, 2340), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (2316, 2340), False, 'import csv\n')]
|
"""@author: <NAME> <<EMAIL>>"""
import os.path
import tempfile
import resource
import subprocess
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
def normalized_claspre_names(raw_names):
"""Convert names from claspre to "absolute" names."""
parent = None
names = []
for raw_name in raw_names:
if raw_name.startswith("_"):
assert parent is not None
names.append(parent + raw_name)
elif len(raw_name) > 0:
names.append(raw_name)
parent = raw_name
return names
def parse_claspre_value(raw_value):
"""Convert values from claspre to floats."""
special = {
"No": -1.0,
"Yes": 1.0,
"NA": 0.0,
}
value = special.get(raw_value)
if value is None:
return float(raw_value)
else:
return value
def get_claspfolio_features_for(asp_path, binaries_path):
"""Invoke claspre to compute features of an ASP instance."""
previous_utime = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime
# get feature names
claspre_path = os.path.join(binaries_path, "claspfolio-0.8.0-x86-linux/clasp+pre-1.3.4")
(names_out, _) = borg.util.check_call_capturing([claspre_path, "--list-features"])
(dynamic_names_out, static_names_out) = names_out.splitlines()
dynamic_names = normalized_claspre_names(dynamic_names_out.split(","))
static_names = normalized_claspre_names(static_names_out.split(","))
# compute feature values
values_command = [
claspre_path,
"--rand-prob=10,30",
"--search-limit=300,10",
"--features=C1",
"--file",
asp_path,
]
num_restarts = 10
logger.info("running %s", values_command)
(values_out, _, _) = borg.util.call_capturing(values_command)
values_per = [map(parse_claspre_value, l.split(",")) for l in values_out.strip().splitlines()]
if len(values_per) < num_restarts + 1:
# claspre failed, or the instance was solved in preprocessing
if len(values_per) == 0:
# (claspre died)
values_per = [[0.0] * len(static_names)]
missing = (num_restarts - len(values_per) + 1)
values_per = values_per[:-1] + ([[0.0] * len(dynamic_names)] * missing) + values_per[-1:]
else:
assert len(values_per) == num_restarts + 1
# pull them together
names = []
values = []
for i in xrange(num_restarts):
names += ["restart{0}-{1}".format(i, n) for n in dynamic_names]
values += values_per[i]
names += static_names
values += values_per[-1]
# ...
cost = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime - previous_utime
borg.get_accountant().charge_cpu(cost)
logger.info("collected features of %s in %.2fs", asp_path, cost)
assert len(names) == len(values)
return (names, values)
def get_lp2sat_features_for(asp_path, binaries_path):
"""Convert to CNF and compute SAT features of an ASP instance."""
with tempfile.NamedTemporaryFile(prefix = "borg.", suffix = ".cnf") as cnf_file:
with open(asp_path, "rb") as asp_file:
try:
borg.domains.asp.run_lp2sat(binaries_path, asp_file, cnf_file)
except borg.domains.asp.LP2SAT_FailedException:
# XXX this workaround is silly; just improve sat.features
cnf_file.seek(0)
cnf_file.truncate(0)
cnf_file.write("p cnf 1 1\n1 0\n")
cnf_file.flush()
return borg.domains.sat.features.get_features_for(cnf_file.name)
def get_features_for(asp_path, binaries_path):
"""Compute features of an ASP instance."""
#(cnf_names, cnf_values) = get_lp2sat_features_for(asp_path, binaries_path)
(clasp_names, clasp_values) = get_claspfolio_features_for(asp_path, binaries_path)
#cnf_qnames = map("cnf-{0}".format, cnf_names)
clasp_qnames = map("clasp-{0}".format, clasp_names)
#return (cnf_qnames + clasp_qnames, cnf_values + clasp_values)
return (clasp_qnames, clasp_values)
|
[
"tempfile.NamedTemporaryFile",
"borg.domains.asp.run_lp2sat",
"borg.util.call_capturing",
"borg.util.check_call_capturing",
"borg.get_accountant",
"resource.getrusage",
"borg.get_logger",
"borg.domains.sat.features.get_features_for"
] |
[((120, 167), 'borg.get_logger', 'borg.get_logger', (['__name__'], {'default_level': '"""INFO"""'}), "(__name__, default_level='INFO')\n", (135, 167), False, 'import borg\n'), ((1205, 1270), 'borg.util.check_call_capturing', 'borg.util.check_call_capturing', (["[claspre_path, '--list-features']"], {}), "([claspre_path, '--list-features'])\n", (1235, 1270), False, 'import borg\n'), ((1789, 1829), 'borg.util.call_capturing', 'borg.util.call_capturing', (['values_command'], {}), '(values_command)\n', (1813, 1829), False, 'import borg\n'), ((1012, 1056), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_CHILDREN'], {}), '(resource.RUSAGE_CHILDREN)\n', (1030, 1056), False, 'import resource\n'), ((3034, 3092), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'prefix': '"""borg."""', 'suffix': '""".cnf"""'}), "(prefix='borg.', suffix='.cnf')\n", (3061, 3092), False, 'import tempfile\n'), ((2648, 2692), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_CHILDREN'], {}), '(resource.RUSAGE_CHILDREN)\n', (2666, 2692), False, 'import resource\n'), ((2724, 2745), 'borg.get_accountant', 'borg.get_accountant', ([], {}), '()\n', (2743, 2745), False, 'import borg\n'), ((3561, 3618), 'borg.domains.sat.features.get_features_for', 'borg.domains.sat.features.get_features_for', (['cnf_file.name'], {}), '(cnf_file.name)\n', (3603, 3618), False, 'import borg\n'), ((3190, 3252), 'borg.domains.asp.run_lp2sat', 'borg.domains.asp.run_lp2sat', (['binaries_path', 'asp_file', 'cnf_file'], {}), '(binaries_path, asp_file, cnf_file)\n', (3217, 3252), False, 'import borg\n')]
|
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME> for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_featherwing.neopixel_featherwing`
====================================================
Helper for using the `NeoPixel FeatherWing <https://www.adafruit.com/product/2945>`_.
* Author(s): <NAME>
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_FeatherWing.git"
import board
import neopixel
from adafruit_featherwing.pixelmatrix import PixelMatrix
class NeoPixelFeatherWing(PixelMatrix):
"""Class representing a `NeoPixel FeatherWing
<https://www.adafruit.com/product/2945>`_.
The feather uses pins D6 by default"""
def __init__(self, pixel_pin=board.D6, brightness=0.1):
"""
:param pin pixel_pin: The pin for the featherwing
:param float brightness: Optional brightness (0.0-1.0) that defaults to 1.0
"""
super().__init__()
self.rows = 4
self.columns = 8
self._matrix = neopixel.NeoPixel(pixel_pin, self.rows * self.columns,
brightness=brightness, auto_write=False,
pixel_order=neopixel.GRB)
def shift_up(self, rotate=False):
"""
Shift all pixels up
:param rotate: (Optional) Rotate the shifted pixels to bottom (default=False)
This example shifts 2 pixels up
.. code-block:: python
import time
from adafruit_featherwing import neopixel_featherwing
neopixel = neopixel_featherwing.NeoPixelFeatherWing()
# Draw Red and Green Pixels
neopixel[4, 1] = (255, 0, 0)
neopixel[5, 1] = (0, 255, 0)
# Rotate it off the screen
for i in range(0, neopixel.rows - 1):
neopixel.shift_up(True)
time.sleep(.1)
time.sleep(1)
# Shift it off the screen
for i in range(0, neopixel.rows - 1):
neopixel.shift_up()
time.sleep(.1)
"""
super().shift_down(rotate) # Up and down are reversed
def shift_down(self, rotate=False):
"""
Shift all pixels down.
:param rotate: (Optional) Rotate the shifted pixels to top (default=False)
This example shifts 2 pixels down
.. code-block:: python
import time
from adafruit_featherwing import neopixel_featherwing
neopixel = neopixel_featherwing.NeoPixelFeatherWing()
# Draw Red and Green Pixels
neopixel[4, 1] = (255, 0, 0)
neopixel[5, 1] = (0, 255, 0)
# Rotate it off the screen
for i in range(0, neopixel.rows - 1):
neopixel.shift_down(True)
time.sleep(.1)
time.sleep(1)
# Shift it off the screen
for i in range(0, neopixel.rows - 1):
neopixel.shift_down()
time.sleep(.1)
"""
super().shift_up(rotate) # Up and down are reversed
|
[
"neopixel.NeoPixel"
] |
[((2068, 2194), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['pixel_pin', '(self.rows * self.columns)'], {'brightness': 'brightness', 'auto_write': '(False)', 'pixel_order': 'neopixel.GRB'}), '(pixel_pin, self.rows * self.columns, brightness=\n brightness, auto_write=False, pixel_order=neopixel.GRB)\n', (2085, 2194), False, 'import neopixel\n')]
|
from __future__ import unicode_literals
import unittest, vmraid
from vmraid.modules import patch_handler
class TestPatches(unittest.TestCase):
def test_patch_module_names(self):
vmraid.flags.final_patches = []
vmraid.flags.in_install = True
for patchmodule in patch_handler.get_all_patches():
if patchmodule.startswith("execute:"):
pass
else:
if patchmodule.startswith("finally:"):
patchmodule = patchmodule.split('finally:')[-1]
self.assertTrue(vmraid.get_attr(patchmodule.split()[0] + ".execute"))
vmraid.flags.in_install = False
|
[
"vmraid.modules.patch_handler.get_all_patches"
] |
[((268, 299), 'vmraid.modules.patch_handler.get_all_patches', 'patch_handler.get_all_patches', ([], {}), '()\n', (297, 299), False, 'from vmraid.modules import patch_handler\n')]
|
import os, sys
import numpy as np
import time
import glob
import random
import math
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
from PIL import Image
import pickle
import torch as tc
from torchvision import transforms
from torchvision import datasets
#from torchvision.datasets.folder import default_loader
from torch.utils.data import DataLoader, Dataset
from data import get_aug_tforms
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
# TODO: specify the return type
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
f.close()
return img
## legacy?
def shuffle_initial_data_order(ld, seed):
n_data = len(ld.dataset)
np.random.seed(seed)
idx_rnd = np.random.permutation(n_data)
##TODO: generalize
if hasattr(ld.dataset, "samples"):
ld.dataset.samples = [ld.dataset.samples[i] for i in idx_rnd]
if hasattr(ld.dataset, "targets"):
ld.dataset.targets = [ld.dataset.targets[i] for i in idx_rnd]
if hasattr(ld.dataset, "imgs"):
ld.dataset.imgs = [ld.dataset.imgs[i] for i in idx_rnd]
if hasattr(ld.dataset, "frames_pair"):
ld.dataset.frames_pair = [ld.dataset.frames_pair[i] for i in idx_rnd]
if hasattr(ld.dataset, "fn"):
ld.dataset.fn = [ld.dataset.fn[i] for i in idx_rnd]
np.random.seed(int(time.time()%2**32))
def shuffle_list(list_ori, seed):
random.seed(seed)
random.shuffle(list_ori)
return list_ori
def split_list(split_ratio, list_ori):
list_split = []
n_start = 0
for i, ratio in enumerate(split_ratio):
n = math.floor(len(list_ori)*ratio)
if i+1 == len(split_ratio):
list_split.append(list_ori[n_start:])
else:
list_split.append(list_ori[n_start:n_start+n])
n_start += n
random.seed(time.time())
return list_split
def get_split_list(split_ratio, root, ext, seed):
fns_train = glob.glob(os.path.join(root, 'train', '**', '**.'+ext)) ##TODO:
fns_val = glob.glob(os.path.join(root, 'val', '**', '**.'+ext))
fns_test = glob.glob(os.path.join(root, 'test', '**', '**.'+ext))
## shuffle list since usually it's sorted
random.seed(seed)
random.shuffle(fns_train)
random.seed(seed)
random.shuffle(fns_val)
random.seed(seed)
random.shuffle(fns_test)
## set splits
fns_split = []
for name, ratio in split_ratio.items():
if ratio is None:
vars()['split_'+name] = vars()['fns_'+name]
else:
fns_split += vars()['fns_'+name]
## random split
random.seed(seed)
random.shuffle(fns_split)
n_start = 0
for name, ratio in split_ratio.items():
if ratio is None:
continue
n = math.floor(len(fns_split)*ratio)
vars()['split_'+name] = fns_split[n_start:n_start+n]
n_start += n
random.seed(time.time())
return {'train': vars()['split_train'], 'val': vars()['split_val'], 'test': vars()['split_test']}
def split_data(split_ratio, data, seed):
## shuffle data
np.random.seed(seed)
random.shuffle(data)
np.random.seed(int(time.time()))
## split data
ratio_list = [(name, ratio) for name, ratio in split_ratio.items()]
name_list = [name for name, _ in ratio_list]
n_list = [math.floor(len(data)*ratio) for _, ratio in ratio_list[:-1]]
n_list = n_list + [len(data) - np.sum(n_list)]
data_split = np.split(data, np.cumsum(n_list))[:-1]
return {n: v for n, v in zip(name_list, data_split)}
# def split_data(data_fns, val_ratio, test_ratio, seed):
# n_data = len(data_fn)
# n_val = round(n_data*val_ratio)
# n_test = round(n_data*test_ratio)
# n_train = n_data - n_val - n_test
# np.random.seed(seed)
# idx_rnd = np.random.permutation(n_data)
# train_fns = data_fns[idx_rnd[:n_train]]
# val_fns = data_fns[idx_rnd[n_train:n_train+n_val]]
# test_fns = data_fns[idx_rnd[n_train+n_val:n_train+n_val+n_test]]
# return train_fns, val_fns, test_fns
def init_loader(dataset_fn, split_list, classes, class_to_idx, domain_label, tforms, rnd, batch_size, num_workers):
dataset = dataset_fn(split_list, classes, class_to_idx, transform=transforms.Compose(tforms), domain_label=domain_label)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=rnd, num_workers=num_workers)
return loader
def init_loader_reg(dataset_fn, data_split, tforms, tforms_y, rnd, batch_size, num_workers):
dataset = dataset_fn(data_split, transform_x=transforms.Compose(tforms), transform_y=transforms.Compose(tforms_y))
loader = DataLoader(dataset, batch_size=batch_size, shuffle=rnd, num_workers=num_workers)
return loader
def find_classes(root):
classes = [d.name for s in ['train', 'val', 'test'] for d in os.scandir(os.path.join(root, s)) if d.is_dir()]
classes = list(set(classes))
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def get_class_name(fn):
return fn.split('/')[-2]
def make_dataset(fn_list, class_to_idx):
instances = []
for fn in fn_list:
class_idx = class_to_idx[get_class_name(fn)]
item = fn, class_idx
instances.append(item)
return instances
class JointLoader:
def __init__(self, lds):
self.lds = lds
def __iter__(self):
self.iters = [iter(ld) for ld in self.lds]
self.iter_end = [False for ld in self.lds]
return self
def __next__(self):
x_list, y_list = [], []
for i, it in enumerate(self.iters):
try:
x, y = next(it)
except StopIteration:
self.iter_end[i] = True
if all(self.iter_end):
raise StopIteration
else:
self.iters[i] = iter(self.lds[i])
x, y = next(self.iters[i])
x_list.append(x)
y_list.append(y)
# maintain the same batch size
bs_min = min([o.shape[0] for o in x_list])
x_list = [o[:bs_min] for o in x_list]
x_list = tc.cat(x_list, 0)
y_list = [o[:bs_min] for o in y_list]
y_list = tc.cat(y_list, 0)
return x_list, y_list
class DomainData:
def __init__(self, dsld_src, dsld_tar):
self.train = JointLoader([dsld_src.train, dsld_tar.train])
self.val = JointLoader([dsld_src.val, dsld_tar.val])
self.test = JointLoader([dsld_src.test, dsld_tar.test])
class ImageList:
def __init__(self, fn_list, classes, class_to_idx, transform=None, target_transform=None, loader=default_loader, domain_label=None):
self.loader = loader
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.samples = make_dataset(fn_list, class_to_idx)
self.domain_label = domain_label
def __getitem__(self, index):
path, target = self.samples[index]
target = target if self.domain_label is None else self.domain_label
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
class ClassificationData:
def __init__(self, root, batch_size,
dataset_fn,
split_ratio,
sample_size,
domain_label,
train_rnd, val_rnd, test_rnd,
train_aug, val_aug, test_aug,
aug_types,
num_workers,
tforms_dft, tforms_dft_rnd,
ext,
seed,
):
## data augmentation tforms
tforms_aug = get_aug_tforms(aug_types)
## tforms for each data split
tforms_train = tforms_dft_rnd if train_rnd else tforms_dft
tforms_train = tforms_train + tforms_aug if train_aug else tforms_train
tforms_val = tforms_dft_rnd if val_rnd else tforms_dft
tforms_val = tforms_val + tforms_aug if val_aug else tforms_val
tforms_test = tforms_dft_rnd if test_rnd else tforms_dft
tforms_test = tforms_test + tforms_aug if test_aug else tforms_test
print("[tforms_train] ", tforms_train)
print("[tforms_val] ", tforms_val)
print("[tforms_test] ", tforms_test)
## splits
split_list = get_split_list(split_ratio, root, ext, seed)
classes, class_to_idx = find_classes(root)
## truncate samples
for name, value in split_list.items():
if sample_size[name] is None:
continue
split_list[name] = value[:sample_size[name]]
## create loaders
self.train = init_loader(dataset_fn, split_list['train'], classes, class_to_idx, domain_label, tforms_train, train_rnd, batch_size, num_workers)
self.val = init_loader(dataset_fn, split_list['val'], classes, class_to_idx, domain_label, tforms_val, val_rnd, batch_size, num_workers)
self.test = init_loader(dataset_fn, split_list['test'], classes, class_to_idx, domain_label, tforms_test, test_rnd, batch_size, num_workers)
class DetectionListDataset:
def __init__(self, split, transform=None, target_transform=None, loader=default_loader, domain_label=None):
self.loader = loader
self.transform = transform
self.target_transform = target_transform
self.samples = [(fn, label) for fn, label in zip(split['fn'], split['label'])]
self.domain_label = domain_label
def __getitem__(self, index):
path, target = self.samples[index]
target = target if self.domain_label is None else self.domain_label
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
class DetectionData:
def __init__(self, root, batch_size,
dataset_fn,
data_split,
#split_ratio,
sample_size,
domain_label,
train_rnd, val_rnd, test_rnd,
train_aug, val_aug, test_aug,
aug_types,
num_workers,
tforms_dft, tforms_dft_rnd,
collate_fn=None,
#ext,
#seed,
):
## data augmentation tforms
tforms_aug = get_aug_tforms(aug_types)
## tforms for each data split
tforms_train = tforms_dft_rnd if train_rnd else tforms_dft
tforms_train = tforms_train + tforms_aug if train_aug else tforms_train
tforms_val = tforms_dft_rnd if val_rnd else tforms_dft
tforms_val = tforms_val + tforms_aug if val_aug else tforms_val
tforms_test = tforms_dft_rnd if test_rnd else tforms_dft
tforms_test = tforms_test + tforms_aug if test_aug else tforms_test
print("[tforms_train] ", tforms_train)
print("[tforms_val] ", tforms_val)
print("[tforms_test] ", tforms_test)
# ## splits
# split_list = get_split_list(split_ratio, root, ext, seed)
# classes, class_to_idx = find_classes(root)
## truncate samples
for name, value in data_split.items():
if sample_size[name] is None:
continue
data_split[name] = {k: v[:sample_size[name]] for k, v in value.items()}
## create loaders
dataset = dataset_fn(data_split['train'], transform=transforms.Compose(tforms_train), domain_label=domain_label)
self.train = DataLoader(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=num_workers, collate_fn=collate_fn)
dataset = dataset_fn(data_split['val'], transform=transforms.Compose(tforms_val), domain_label=domain_label)
self.val = DataLoader(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=num_workers, collate_fn=collate_fn)
dataset = dataset_fn(data_split['test'], transform=transforms.Compose(tforms_test), domain_label=domain_label)
self.test = DataLoader(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=num_workers, collate_fn=collate_fn)
# class Data_old:
# def __init__(self, root, batch_size,
# dataset_fn,
# train_rnd, val_rnd, test_rnd,
# train_aug, val_aug, test_aug,
# aug_types,
# num_workers,
# tforms_dft, tforms_dft_rnd,
# seed=0,
# ):
# ## data augmentation tforms
# tforms_aug = get_aug_tforms(aug_types)
# ## tforms for each data split
# tforms_train = tforms_dft_rnd if train_rnd else tforms_dft
# tforms_train += tforms_aug if train_aug else []
# tforms_val = tforms_dft if val_rnd else tforms_dft
# tforms_val += tforms_aug if val_aug else []
# tforms_test = tforms_dft if test_rnd else tforms_dft
# tforms_test += tforms_aug if test_aug else []
# ## create loaders
# subroot = os.path.join(root, "train")
# if os.path.exists(subroot):
# dataset = dataset_fn(subroot, transform=tforms.Compose(tforms_train))
# self.train = DataLoader(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=num_workers)
# shuffle_initial_data_order(self.train, seed)
# else:
# self.train = None
# subroot = os.path.join(root, "val")
# if os.path.exists(subroot):
# dataset = dataset_fn(subroot, transform=tforms.Compose(tforms_val))
# self.val = DataLoader(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=num_workers)
# shuffle_initial_data_order(self.val, seed)
# else:
# self.val = None
# subroot = os.path.join(root, "test")
# if os.path.exists(subroot):
# dataset = dataset_fn(subroot, transform=tforms.Compose(tforms_test))
# self.test = DataLoader(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=num_workers)
# shuffle_initial_data_order(self.test, seed)
# else:
# self.test = None
class ImageDataset(datasets.ImageFolder):
def __init__(self, root, transform, domain_label=None):
super().__init__(root, transform=transform)
self.domain_label = domain_label
def __getitem__(self, index):
sample, target = super().__getitem__(index)
target = target if self.domain_label is None else self.domain_label
return sample, target
class ImageData:
def __init__(self, root, batch_size,
train_rnd, val_rnd, test_rnd,
train_aug, val_aug, test_aug,
aug_types,
num_workers,
tforms_dft, tforms_dft_rnd,
domain_label=None,
seed=0,
):
## data augmentation tforms
tforms_aug = get_aug_tforms(aug_types)
## tforms for each data split
tforms_train = tforms_dft_rnd if train_rnd else tforms_dft
tforms_train += tforms_aug if train_aug else []
tforms_val = tforms_dft if val_rnd else tforms_dft
tforms_val += tforms_aug if val_aug else []
tforms_test = tforms_dft if test_rnd else tforms_dft
tforms_test += tforms_aug if test_aug else []
## create loaders
#dataset = datasets.ImageFolder(os.path.join(root, "train"), transform=transforms.Compose(tforms_train))
dataset = ImageDataset(os.path.join(root, "train"), transform=transforms.Compose(tforms_train), domain_label=domain_label)
self.train = DataLoader(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=num_workers)
#dataset = datasets.ImageFolder(os.path.join(root, "val"), transform=transforms.Compose(tforms_val))
dataset = ImageDataset(os.path.join(root, "val"), transform=transforms.Compose(tforms_val), domain_label=domain_label)
self.val = DataLoader(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=num_workers)
#dataset = datasets.ImageFolder(os.path.join(root, "test"), transform=transforms.Compose(tforms_test))
dataset = ImageDataset(os.path.join(root, "test"), transform=transforms.Compose(tforms_test), domain_label=domain_label)
self.test = DataLoader(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=num_workers)
## shuffle initial order
shuffle_initial_data_order(self.train, seed)
shuffle_initial_data_order(self.val, seed)
shuffle_initial_data_order(self.test, seed)
##
## regression
##
class RegressionDatasetLight(Dataset):
def __init__(self, data, transform_x, transform_y, label_index=-1):
self.label_index = label_index
self.data = data
self.transform_x = transform_x
self.transform_y = transform_y
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
data_i = self.data[idx]
y = [data_i[self.label_index]]
x = np.delete(data_i, self.label_index)
return self.transform_x(x), self.transform_y(y)
class RegressionDataset(Dataset):
def __init__(self, root, singlefile=False, label_index=-1):
self.singlefile = singlefile
self.label_index = label_index
if self.singlefile:
fn = glob.glob(os.path.join(root, "*.pk"))[0]
self.data = pickle.load(open(fn, 'rb'))
else:
self.fns = glob.glob(os.path.join(root, "*.pk"))
def __len__(self):
if self.singlefile:
return len(self.fns)
else:
return len(self.data)
def __getitem__(self, idx):
if self.singlefile:
with open(self.fns[idx], "rb") as f:
return pickle.load(f)
else:
data_i = data[idx]
y = data[self.label_index]
x = np.delete(data, self.label_index)
return x, y
class RegressionDataLight:
def __init__(self, root, batch_size,
dataset_fn,
split_ratio,
sample_size,
#domain_label,
train_rnd, val_rnd, test_rnd,
train_aug, val_aug, test_aug,
aug_types,
num_workers,
tforms_x_dft, tforms_x_dft_rnd,
tforms_y_dft, tforms_y_dft_rnd,
#ext,
seed,
):
## data augmentation tforms
tforms_aug = get_aug_tforms(aug_types)
## tforms for each data split
tforms_train = tforms_x_dft_rnd if train_rnd else tforms_x_dft
tforms_train = tforms_train + tforms_aug if train_aug else tforms_train
tforms_val = tforms_x_dft_rnd if val_rnd else tforms_x_dft
tforms_val = tforms_val + tforms_aug if val_aug else tforms_val
tforms_test = tforms_x_dft_rnd if test_rnd else tforms_x_dft
tforms_test = tforms_test + tforms_aug if test_aug else tforms_test
tforms_y_train = tforms_y_dft_rnd if train_rnd else tforms_y_dft
tforms_y_val= tforms_y_dft_rnd if val_rnd else tforms_y_dft
tforms_y_test = tforms_y_dft_rnd if test_rnd else tforms_y_dft
print("[tforms_train] ", tforms_train)
print("[tforms_val] ", tforms_val)
print("[tforms_test] ", tforms_test)
## load data
fn = glob.glob(os.path.join(root, "*.pk"))[0]
data = pickle.load(open(fn, 'rb'))
## splits
data_split = split_data(split_ratio, data, seed)
## truncate samples
for name, value in data_split.items():
if sample_size[name] is None:
continue
data_split[name] = value[:sample_size[name]]
## create loaders
self.train = init_loader_reg(dataset_fn, data_split['train'], tforms_train, tforms_y_train, train_rnd, batch_size, num_workers)
self.val = init_loader_reg(dataset_fn, data_split['val'], tforms_val, tforms_y_val, val_rnd, batch_size, num_workers)
self.test = init_loader_reg(dataset_fn, data_split['test'], tforms_test, tforms_y_test, test_rnd, batch_size, num_workers)
def compute_num_exs(ld, verbose=False):
n = 0
t = time.time()
for x, _ in ld:
n += x.shape[0]
if verbose:
print("[%f sec.] n = %d"%(time.time()-t, n))
t = time.time()
return n
def xywh2xyxy(xywh):
xyxy = xywh.clone()
if len(xyxy.size()) == 2:
xyxy[:, 2:] = xywh[:, :2] + xywh[:, 2:]
else:
xyxy[2:] = xywh[:2] + xywh[2:]
return xyxy
def xyxy2xywh(xyxy):
xywh = xyxy.clone()
xywh[:, 2:] = xyxy[:, 2:] - xyxy[:, :2]
return xywh
def plot_bb(img, bb_xywh, fn=None):
img_PIL = transforms.ToPILImage()(img)
draw = ImageDraw.Draw(img_PIL)
draw.rectangle((*bb_xywh[:2], *(bb_xywh[:2]+bb_xywh[2:])), outline="white", width=2)
if fn is not None:
img_PIL.save(fn)
else:
return img_PIL
|
[
"numpy.random.seed",
"numpy.sum",
"torch.utils.data.DataLoader",
"random.shuffle",
"accimage.Image",
"torchvision.transforms.ToPILImage",
"torch.cat",
"time.time",
"PIL.Image.open",
"numpy.cumsum",
"data.get_aug_tforms",
"random.seed",
"torchvision.transforms.Compose",
"pickle.load",
"torchvision.get_image_backend",
"numpy.random.permutation",
"os.path.join",
"numpy.delete"
] |
[((1242, 1262), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1256, 1262), True, 'import numpy as np\n'), ((1277, 1306), 'numpy.random.permutation', 'np.random.permutation', (['n_data'], {}), '(n_data)\n', (1298, 1306), True, 'import numpy as np\n'), ((1951, 1968), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1962, 1968), False, 'import random\n'), ((1973, 1997), 'random.shuffle', 'random.shuffle', (['list_ori'], {}), '(list_ori)\n', (1987, 1997), False, 'import random\n'), ((2748, 2765), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2759, 2765), False, 'import random\n'), ((2770, 2795), 'random.shuffle', 'random.shuffle', (['fns_train'], {}), '(fns_train)\n', (2784, 2795), False, 'import random\n'), ((2800, 2817), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2811, 2817), False, 'import random\n'), ((2822, 2845), 'random.shuffle', 'random.shuffle', (['fns_val'], {}), '(fns_val)\n', (2836, 2845), False, 'import random\n'), ((2850, 2867), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2861, 2867), False, 'import random\n'), ((2872, 2896), 'random.shuffle', 'random.shuffle', (['fns_test'], {}), '(fns_test)\n', (2886, 2896), False, 'import random\n'), ((3157, 3174), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3168, 3174), False, 'import random\n'), ((3179, 3204), 'random.shuffle', 'random.shuffle', (['fns_split'], {}), '(fns_split)\n', (3193, 3204), False, 'import random\n'), ((3638, 3658), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3652, 3658), True, 'import numpy as np\n'), ((3663, 3683), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (3677, 3683), False, 'import random\n'), ((4872, 4957), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=rnd, num_workers=num_workers\n )\n', (4882, 4957), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((5198, 5283), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=rnd, num_workers=num_workers\n )\n', (5208, 5283), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((21443, 21454), 'time.time', 'time.time', ([], {}), '()\n', (21452, 21454), False, 'import time\n'), ((508, 527), 'torchvision.get_image_backend', 'get_image_backend', ([], {}), '()\n', (525, 527), False, 'from torchvision import get_image_backend\n'), ((739, 759), 'accimage.Image', 'accimage.Image', (['path'], {}), '(path)\n', (753, 759), False, 'import accimage\n'), ((1073, 1086), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (1083, 1086), False, 'from PIL import Image\n'), ((2391, 2402), 'time.time', 'time.time', ([], {}), '()\n', (2400, 2402), False, 'import time\n'), ((2504, 2550), 'os.path.join', 'os.path.join', (['root', '"""train"""', '"""**"""', "('**.' + ext)"], {}), "(root, 'train', '**', '**.' + ext)\n", (2516, 2550), False, 'import os, sys\n'), ((2583, 2627), 'os.path.join', 'os.path.join', (['root', '"""val"""', '"""**"""', "('**.' + ext)"], {}), "(root, 'val', '**', '**.' + ext)\n", (2595, 2627), False, 'import os, sys\n'), ((2652, 2697), 'os.path.join', 'os.path.join', (['root', '"""test"""', '"""**"""', "('**.' + ext)"], {}), "(root, 'test', '**', '**.' + ext)\n", (2664, 2697), False, 'import os, sys\n'), ((3455, 3466), 'time.time', 'time.time', ([], {}), '()\n', (3464, 3466), False, 'import time\n'), ((6766, 6783), 'torch.cat', 'tc.cat', (['x_list', '(0)'], {}), '(x_list, 0)\n', (6772, 6783), True, 'import torch as tc\n'), ((6847, 6864), 'torch.cat', 'tc.cat', (['y_list', '(0)'], {}), '(y_list, 0)\n', (6853, 6864), True, 'import torch as tc\n'), ((8578, 8603), 'data.get_aug_tforms', 'get_aug_tforms', (['aug_types'], {}), '(aug_types)\n', (8592, 8603), False, 'from data import get_aug_tforms\n'), ((11440, 11465), 'data.get_aug_tforms', 'get_aug_tforms', (['aug_types'], {}), '(aug_types)\n', (11454, 11465), False, 'from data import get_aug_tforms\n'), ((12610, 12724), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'train_rnd', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=\n num_workers, collate_fn=collate_fn)\n', (12620, 12724), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((12857, 12969), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'val_rnd', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=\n num_workers, collate_fn=collate_fn)\n', (12867, 12969), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((13105, 13218), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'test_rnd', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=\n num_workers, collate_fn=collate_fn)\n', (13115, 13218), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((16046, 16071), 'data.get_aug_tforms', 'get_aug_tforms', (['aug_types'], {}), '(aug_types)\n', (16060, 16071), False, 'from data import get_aug_tforms\n'), ((16760, 16851), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'train_rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=train_rnd, num_workers=\n num_workers)\n', (16770, 16851), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((17102, 17191), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'val_rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=val_rnd, num_workers=\n num_workers)\n', (17112, 17191), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((17447, 17537), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'test_rnd', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, shuffle=test_rnd, num_workers=\n num_workers)\n', (17457, 17537), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((18202, 18237), 'numpy.delete', 'np.delete', (['data_i', 'self.label_index'], {}), '(data_i, self.label_index)\n', (18211, 18237), True, 'import numpy as np\n'), ((19711, 19736), 'data.get_aug_tforms', 'get_aug_tforms', (['aug_types'], {}), '(aug_types)\n', (19725, 19736), False, 'from data import get_aug_tforms\n'), ((21965, 21988), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (21986, 21988), False, 'from torchvision import transforms\n'), ((3707, 3718), 'time.time', 'time.time', ([], {}), '()\n', (3716, 3718), False, 'import time\n'), ((4024, 4041), 'numpy.cumsum', 'np.cumsum', (['n_list'], {}), '(n_list)\n', (4033, 4041), True, 'import numpy as np\n'), ((4804, 4830), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms'], {}), '(tforms)\n', (4822, 4830), False, 'from torchvision import transforms\n'), ((5115, 5141), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms'], {}), '(tforms)\n', (5133, 5141), False, 'from torchvision import transforms\n'), ((5155, 5183), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_y'], {}), '(tforms_y)\n', (5173, 5183), False, 'from torchvision import transforms\n'), ((16639, 16666), 'os.path.join', 'os.path.join', (['root', '"""train"""'], {}), "(root, 'train')\n", (16651, 16666), False, 'import os, sys\n'), ((16987, 17012), 'os.path.join', 'os.path.join', (['root', '"""val"""'], {}), "(root, 'val')\n", (16999, 17012), False, 'import os, sys\n'), ((17329, 17355), 'os.path.join', 'os.path.join', (['root', '"""test"""'], {}), "(root, 'test')\n", (17341, 17355), False, 'import os, sys\n'), ((19094, 19127), 'numpy.delete', 'np.delete', (['data', 'self.label_index'], {}), '(data, self.label_index)\n', (19103, 19127), True, 'import numpy as np\n'), ((21592, 21603), 'time.time', 'time.time', ([], {}), '()\n', (21601, 21603), False, 'import time\n'), ((1887, 1898), 'time.time', 'time.time', ([], {}), '()\n', (1896, 1898), False, 'import time\n'), ((3975, 3989), 'numpy.sum', 'np.sum', (['n_list'], {}), '(n_list)\n', (3981, 3989), True, 'import numpy as np\n'), ((5399, 5420), 'os.path.join', 'os.path.join', (['root', 's'], {}), '(root, s)\n', (5411, 5420), False, 'import os, sys\n'), ((12528, 12560), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_train'], {}), '(tforms_train)\n', (12546, 12560), False, 'from torchvision import transforms\n'), ((12779, 12809), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_val'], {}), '(tforms_val)\n', (12797, 12809), False, 'from torchvision import transforms\n'), ((13025, 13056), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_test'], {}), '(tforms_test)\n', (13043, 13056), False, 'from torchvision import transforms\n'), ((16678, 16710), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_train'], {}), '(tforms_train)\n', (16696, 16710), False, 'from torchvision import transforms\n'), ((17024, 17054), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_val'], {}), '(tforms_val)\n', (17042, 17054), False, 'from torchvision import transforms\n'), ((17367, 17398), 'torchvision.transforms.Compose', 'transforms.Compose', (['tforms_test'], {}), '(tforms_test)\n', (17385, 17398), False, 'from torchvision import transforms\n'), ((18663, 18689), 'os.path.join', 'os.path.join', (['root', '"""*.pk"""'], {}), "(root, '*.pk')\n", (18675, 18689), False, 'import os, sys\n'), ((18979, 18993), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (18990, 18993), False, 'import pickle\n'), ((20605, 20631), 'os.path.join', 'os.path.join', (['root', '"""*.pk"""'], {}), "(root, '*.pk')\n", (20617, 20631), False, 'import os, sys\n'), ((18533, 18559), 'os.path.join', 'os.path.join', (['root', '"""*.pk"""'], {}), "(root, '*.pk')\n", (18545, 18559), False, 'import os, sys\n'), ((21557, 21568), 'time.time', 'time.time', ([], {}), '()\n', (21566, 21568), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The get_path lookup plugin
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
name: get_path
author: <NAME> (@cidrblock)
version_added: "1.0.0"
short_description: Retrieve the value in a variable using a path
description:
- Use a I(path) to retrieve a nested value from a I(var)
- B(get_path) is also available as a B(filter plugin) for convenience
- Using the parameters below- C(lookup('ansible.utils.get_path', var, path, wantlist))
options:
var:
description: The variable from which the value should be extracted.
type: raw
required: True
path:
description: >
The I(path) in the I(var) to retrieve the value of.
The I(path) needs to a be a valid jinja path.
type: str
required: True
wantlist:
description: >
If set to C(True), the return value will always be a list.
This can also be accomplished using C(query) or C(q) instead of C(lookup).
U(https://docs.ansible.com/ansible/latest/plugins/lookup.html).
type: bool
notes:
"""
EXAMPLES = r"""
- ansible.builtin.set_fact:
a:
b:
c:
d:
- 0
- 1
e:
- True
- False
- name: Retrieve a value deep inside a using a path
ansible.builtin.set_fact:
value: "{{ lookup('ansible.utils.get_path', a, path) }}"
vars:
path: b.c.d[0]
# TASK [Retrieve a value deep inside a using a path] ******************
# ok: [localhost] => changed=false
# ansible_facts:
# value: '0'
#### Working with hostvars
- name: Retrieve a value deep inside all of the host's vars
ansible.builtin.set_fact:
value: "{{ lookup('ansible.utils.get_path', look_in, look_for) }}"
vars:
look_in: "{{ hostvars[inventory_hostname] }}"
look_for: a.b.c.d[0]
# TASK [Retrieve a value deep inside all of the host's vars] ********
# ok: [nxos101] => changed=false
# ansible_facts:
# as_filter: '0'
# as_lookup: '0'
#### Used alongside ansible.utils.to_paths
- name: Get the paths for the object
ansible.builtin.set_fact:
paths: "{{ lookup('ansible.utils.to_paths', a, prepend='a') }}"
- name: Retrieve the value of each path from vars
ansible.builtin.debug:
msg: "The value of path {{ path }} in vars is {{ value }}"
loop: "{{ paths.keys()|list }}"
loop_control:
label: "{{ item }}"
vars:
path: "{{ item }}"
value: "{{ lookup('ansible.utils.get_path', hostvars[inventory_hostname], item) }}"
# TASK [Get the paths for the object] *******************************
# ok: [nxos101] => changed=false
# ansible_facts:
# paths:
# a.b.c.d[0]: 0
# a.b.c.d[1]: 1
# a.b.c.e[0]: True
# a.b.c.e[1]: False
# TASK [Retrieve the value of each path from vars] ******************
# ok: [nxos101] => (item=a.b.c.d[0]) =>
# msg: The value of path a.b.c.d[0] in vars is 0
# ok: [nxos101] => (item=a.b.c.d[1]) =>
# msg: The value of path a.b.c.d[1] in vars is 1
# ok: [nxos101] => (item=a.b.c.e[0]) =>
# msg: The value of path a.b.c.e[0] in vars is True
# ok: [nxos101] => (item=a.b.c.e[1]) =>
# msg: The value of path a.b.c.e[1] in vars is False
#### Working with complex structures and transforming results
- name: Retrieve the current interface config
cisco.nxos.nxos_interfaces:
state: gathered
register: interfaces
- name: Get the description of several interfaces
ansible.builtin.debug:
msg: "{{ lookup('ansible.utils.get_path', rekeyed, item) }}"
vars:
rekeyed:
by_name: "{{ interfaces.gathered|ansible.builtin.rekey_on_member('name') }}"
loop:
- by_name['Ethernet1/1'].description
- by_name['Ethernet1/2'].description|upper
- by_name['Ethernet1/3'].description|default('')
# TASK [Get the description of several interfaces] ******************
# ok: [nxos101] => (item=by_name['Ethernet1/1'].description) => changed=false
# msg: Configured by ansible
# ok: [nxos101] => (item=by_name['Ethernet1/2'].description|upper) => changed=false
# msg: CONFIGURED BY ANSIBLE
# ok: [nxos101] => (item=by_name['Ethernet1/3'].description|default('')) => changed=false
# msg: ''
"""
RETURN = """
_raw:
description:
- One or more zero-based indices of the matching list items.
- See C(wantlist) if a list is always required.
"""
from ansible.errors import AnsibleLookupError
from ansible.plugins.lookup import LookupBase
from ansible_collections.ansible.utils.plugins.module_utils.common.get_path import (
get_path,
)
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if isinstance(terms, list):
keys = ["var", "path"]
terms = dict(zip(keys, terms))
terms.update(kwargs)
aav = AnsibleArgSpecValidator(
data=terms, schema=DOCUMENTATION, name="get_path"
)
valid, errors, updated_data = aav.validate()
if not valid:
raise AnsibleLookupError(errors)
updated_data["wantlist"] = True
updated_data["environment"] = self._templar.environment
res = get_path(**updated_data)
return res
|
[
"ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate.AnsibleArgSpecValidator",
"ansible_collections.ansible.utils.plugins.module_utils.common.get_path.get_path",
"ansible.errors.AnsibleLookupError"
] |
[((5129, 5203), 'ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate.AnsibleArgSpecValidator', 'AnsibleArgSpecValidator', ([], {'data': 'terms', 'schema': 'DOCUMENTATION', 'name': '"""get_path"""'}), "(data=terms, schema=DOCUMENTATION, name='get_path')\n", (5152, 5203), False, 'from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import AnsibleArgSpecValidator\n'), ((5464, 5488), 'ansible_collections.ansible.utils.plugins.module_utils.common.get_path.get_path', 'get_path', ([], {}), '(**updated_data)\n', (5472, 5488), False, 'from ansible_collections.ansible.utils.plugins.module_utils.common.get_path import get_path\n'), ((5319, 5345), 'ansible.errors.AnsibleLookupError', 'AnsibleLookupError', (['errors'], {}), '(errors)\n', (5337, 5345), False, 'from ansible.errors import AnsibleLookupError\n')]
|
"""
Functions to load xps data from measurement files
"""
import numpy as np
import matplotlib.pyplot as plt
from heuslertools.tools.measurement import Measurement
from heuslertools.tem.ser_reader import serReader
class TEMMeasurement(Measurement):
"""Object representing a Measurement
Parameters
----------
file : str
path of file
integration_time : float, optional
integration time of measurement, if given cps are calculated, by default `None`
"""
def __init__(self, file, **kwargs):
super().__init__(file, "", **kwargs)
def _load_data(self):
return serReader(self.file)
def _generate_names(self):
for name in self.data:
self.names[name] = {"short_name": name, "unit": "a.u."}
def append_measurement(self, file):
"""Append data from another file.
Parameters
----------
file : str
path of file to append
"""
self.data = np.append(self.data, serReader(file))
def tem_xy_ticks(self, ax):
ax.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelbottom=False,
labelleft=False)
|
[
"heuslertools.tem.ser_reader.serReader"
] |
[((621, 641), 'heuslertools.tem.ser_reader.serReader', 'serReader', (['self.file'], {}), '(self.file)\n', (630, 641), False, 'from heuslertools.tem.ser_reader import serReader\n'), ((1002, 1017), 'heuslertools.tem.ser_reader.serReader', 'serReader', (['file'], {}), '(file)\n', (1011, 1017), False, 'from heuslertools.tem.ser_reader import serReader\n')]
|
import math
import sys
def merge(arr, l, m, r):
n1 = m - l + 1
n2 = r- m
L = [0] * (n1)
R = [0] * (n2)
for i in range(0 , n1):
L[i] = arr[l + i]
for j in range(0 , n2):
R[j] = arr[m + 1 + j]
i = 0
j = 0
k = l
while i < n1 and j < n2 :
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
while i < n1:
arr[k] = L[i]
i += 1
k += 1
while j < n2:
arr[k] = R[j]
j += 1
k += 1
def mergeSort(arr,l,r):
if l < r:
m = math.floor((l+(r-1))/2)
mergeSort(arr, l, m)
mergeSort(arr, m+1, r)
merge(arr, l, m, r)
strm=''
num=sys.stdin.readlines()
for item in num:
strm+=item
arr=[int(x) for x in strm.split()]
length=arr.pop(0)
mergeSort(arr,0,length-1)
strn=''
for item in arr:
strn+=str(item)+' '
print(strn)
|
[
"math.floor",
"sys.stdin.readlines"
] |
[((627, 648), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (646, 648), False, 'import sys\n'), ((516, 545), 'math.floor', 'math.floor', (['((l + (r - 1)) / 2)'], {}), '((l + (r - 1)) / 2)\n', (526, 545), False, 'import math\n')]
|
# Copyright 2021 Dice Finding Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Calculates best-fit dice poses from an image and estimated bounding boxes, classes, and y-rotations of dice and dots."""
import numpy as np
import tensorflow.compat.v2 as tf
import cv2
from pycpd import AffineRegistration, RigidRegistration
from collections import defaultdict
import copy
import DiceConfig
from CoordsHelper import Pose
from typing import Dict, List, Sequence
from DiceProjection import get_local_dots_facing_camera_from_eye_space_pose
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
distance_scale_px = 8.0 #Distance scale for calculating non-linear loss functions, similar to distance functions used in robust nonlinear regression, like https://scipy-cookbook.readthedocs.io/items/robust_regression.html
approx_distance_score_cutoff_fraction = 1.25 #Fraction of minimum Pose score (of approximate poses) that should be compared by full pose calculation
rounded_rectangle_radius = 0.35 #Radius of rounded rectangle for each die's bounding box. Dots at the corners outside the rounded radius are excluded, at they will belong to other die.
inlier_cutoff_px = 10
class PoseResult(object):
"""Represents the result of a pose fit, given in both pyrender and opencv coordinates.
Also calculates comparison against found dot positions in the image."""
def __init__(self, pose : Pose, additional_data : Dict, in_pyrender_coords : bool, distance_scale : float = distance_scale_px):
if in_pyrender_coords:
self.pose_pyrender = pose
self.pose_cv = pose.get_converted_between_cv_and_pyrender_coords()
else:
self.pose_cv = pose
self.pose_pyrender = pose.get_converted_between_cv_and_pyrender_coords()
self.additional_data = additional_data
self.distance_scale = distance_scale
self.comparison_points_cv = None
self.comparison_camera_matrix = None
self.comparison_distortion_coefficients = None
self.projected_points = None
self.comparison_projected_distances = None
self.comparison_soft_l1_distances = None
self.comparison_cauchy_distances = None
self.comparison_arctan_distances = None
self.assignment_score_function = None
self.assignment_scores = None
self.comparison_indices = None
self.projected_indices = None
self.matched_scores = None
self.matched_scores_rms = None
self.calculate_inliers_within_bounding_box = False
@property
def has_comparison(self):
"""If comparison has been calculated."""
return self.comparison_results is not None
def calculate_comparison(self, dot_centers_cv : np.ndarray, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray):
"""Calculates comparison against the given found dot center points, using the given camera matrix and distortion coefficients to perform projection."""
self.comparison_points_cv = dot_centers_cv
self.comparison_camera_matrix = camera_matrix
self.comparison_distortion_coefficients = distortion_coefficients
self._perform_comparison()
def get_soft_l1_distance_scores(self, distances : np.ndarray):
"""Calculates soft-l1 distance scores for a set of distances."""
return self.distance_scale * 2.0 * (np.sqrt(1.0 + (distances/self.distance_scale)**2) - 1.0)
def get_cauchy_distance_scores(self, distances : np.ndarray):
"""Calculates Cauchy distance scores for a set of distances."""
return self.distance_scale * np.log1p((distances/self.distance_scale)**2)
def get_arctan_distance_scores(self, distances : np.ndarray):
"""Calculates arctan distance scores for a set of distances."""
return self.distance_scale * np.arctan((distances/self.distance_scale)**2)
def _perform_comparison(self):
"""Performs comparison against the found dot center points, using the given camera matrix and distortion coefficients to perform projection."""
local_dots_visible_in_eye_space = get_local_dots_facing_camera_from_eye_space_pose(self.pose_pyrender)
pose_points, pose_points_jacobian = cv2.projectPoints(local_dots_visible_in_eye_space, self.pose_cv.rotation_rodrigues, self.pose_cv.translation, self.comparison_camera_matrix, self.comparison_distortion_coefficients)
self.projected_points = np.squeeze(pose_points, axis = 1)
#For matching points: See https://stackoverflow.com/questions/41936760/how-to-find-a-unique-set-of-closest-pairs-of-points which suggests using https://en.wikipedia.org/wiki/Hungarian_algorithm on assignment problem. See https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.optimize.linear_sum_assignment.html
self.comparison_projected_distances = cdist(self.comparison_points_cv, self.projected_points)#MxN matrix with M comparison_points and N projected points
self.comparison_soft_l1_distances = self.get_soft_l1_distance_scores(self.comparison_projected_distances)
self.comparison_cauchy_distances = self.get_cauchy_distance_scores(self.comparison_projected_distances)
self.comparison_arctan_distances = self.get_arctan_distance_scores(self.comparison_projected_distances)
self.assignment_score_function = lambda d : self.get_arctan_distance_scores(d)
self.assignment_scores = self.assignment_score_function(self.comparison_projected_distances)
self.comparison_indices, self.projected_indices = linear_sum_assignment(self.assignment_scores)#Returns (row_indices, column_indices)
self.matched_scores = self.assignment_scores[self.comparison_indices, self.projected_indices]
self.matched_scores_rms = np.sqrt(np.mean(np.square(self.matched_scores)))
def calculate_inliers(self, dot_bounding_boxes_sizes_cv):
"""Calculates inliers of projected against found points, potentially including the found dot bounding box sizes in addition to their positions."""
projected_points_ordered = self.projected_points[self.projected_indices, :]
found_points_ordered = self.comparison_points_cv[self.comparison_indices, :]
differences = projected_points_ordered - found_points_ordered
if self.calculate_inliers_within_bounding_box:
found_bb_sizes_ordered = dot_bounding_boxes_sizes_cv[self.comparison_indices, :]
difference_bb_fractions = differences / found_bb_sizes_ordered
abs_bb_fractions = np.abs(difference_bb_fractions)
max_bb_fraction = np.max(abs_bb_fractions, axis = -1)
inliers = max_bb_fraction < 1.0
else:
center_distances = np.linalg.norm(differences, axis = -1)
inliers = center_distances < inlier_cutoff_px
self.comparison_inlier_indices = tf.boolean_mask(self.comparison_indices, inliers)
self.projected_inlier_indices = tf.boolean_mask(self.projected_indices, inliers)
def _delete_tf(tensor, idx, axis=0):
"""Deletes from a tensor along an axis at the given index."""
n = tf.shape(tensor)[axis]
t = tf.ones_like(idx, dtype=tf.bool)
m = ~tf.scatter_nd(tf.expand_dims(idx, 1), t, [n])
return tf.boolean_mask(tensor, m, axis=axis)
def _bounding_box_intersection(bounding_box_cv_1 : np.ndarray, bounding_box_cv_2 : np.ndarray):
"""Gets the intersection of two OpenCV-coordinate bounding boxes."""
x_min = np.max(bounding_box_cv_1[0], bounding_box_cv_2[0])
y_min = np.max(bounding_box_cv_1[1], bounding_box_cv_2[1])
x_max = np.min(bounding_box_cv_1[2], bounding_box_cv_2[2])
y_max = np.min(bounding_box_cv_1[3], bounding_box_cv_2[3])
intersection = np.array([x_min, y_min, x_max, y_max])
return intersection
def _bounding_box_area(bounding_box_cv : np.ndarray, clip_negative = True):
"""Gets the area of a bounding box."""
width = bounding_box_cv[2] - bounding_box_cv[0]
height = bounding_box_cv[3] - bounding_box_cv[1]
if clip_negative:
width = np.max(width, 0.0)
height = np.max(height, 0.0)
area = width * height
return area
def _intersection_over_union(bounding_box_cv_1 : np.ndarray, bounding_box_cv_2 : np.ndarray):
"""Calculates the intersection over union for two bounding boxes in OpenCV coordinates."""
intersection_bb = _bounding_box_intersection(bounding_box_cv_1, bounding_box_cv_2)
area_intersection = _bounding_box_area(intersection_bb)
area_1 = _bounding_box_area(bounding_box_cv_1)
area_2 = _bounding_box_area(bounding_box_cv_2)
iou = area_intersection / (area_1 + area_2 - area_intersection)
return iou
def _get_dot_centers(dot_bounding_boxes):
"""Gets the center of Tensor bounding boxes."""
axis_0 = tf.gather(dot_bounding_boxes, [0, 2], axis = -1)
axis_1 = tf.gather(dot_bounding_boxes, [1, 3], axis = -1)
axis_0_average = tf.reduce_mean(axis_0, axis = -1)
axis_1_average = tf.reduce_mean(axis_1, axis = -1)
centers = tf.stack([axis_0_average, axis_1_average], axis = -1)
return centers
def _get_dot_sizes(dot_bounding_boxes):
"""Gets the size of Tensor bounding boxes."""
axis_0 = dot_bounding_boxes[:, 2] - dot_bounding_boxes[:, 0]
axis_1 = dot_bounding_boxes[:, 3] - dot_bounding_boxes[:, 1]
sizes = tf.stack([axis_0, axis_1], axis = -1)
return sizes
def _get_die_local_up_forward_right_axes(die_class : int):
"""Gets the local up, forward and right axes defined for a given die class (top face)."""
die_local_face_normals = DiceConfig.get_local_face_normals()
die_local_forward_axis = DiceConfig.get_local_face_forward(die_class)
die_local_up_axis = die_local_face_normals[:, die_class - 1]
die_local_right_axis = np.cross(die_local_up_axis, die_local_forward_axis)
return die_local_up_axis, die_local_forward_axis, die_local_right_axis
def _get_approximate_die_pose(die_class : int, y_angle_deg : float, bounding_box_pose_result : PoseResult, x_axis_rotations_deg : Sequence[float], y_rot_offsets_deg : Sequence[float]) -> PoseResult:
"""Gets an approximate pose given the die class, rotation around vertical axes, and a rough pose result estimated from the bounding box.
Checks the given set of rotation offsets rotations around x and y axes."""
bb_pnp_res, bb_rotation, bb_translation = bounding_box_pose_result.pose_cv
if bb_pnp_res:
from scipy.spatial.transform import Rotation
bb_translation_pyrender_coords = bb_translation * np.array([1, -1, -1])[:,np.newaxis]
angle_of_translation_deg = np.rad2deg(np.arctan2(-bb_translation_pyrender_coords[0], -bb_translation_pyrender_coords[2]))
y_angle_deg_with_position_offset = y_angle_deg + angle_of_translation_deg
pose_results = []
die_local_up_axis, die_local_forward_axis, die_local_right_axis = _get_die_local_up_forward_right_axes(die_class)
for y_rot_offset_deg in y_rot_offsets_deg:
y_angle_final = y_angle_deg_with_position_offset + y_rot_offset_deg
angle_cos = np.cos(np.deg2rad(y_angle_final))
angle_sin = np.sin(np.deg2rad(y_angle_final))
die_local_to_scene_rotation = np.eye(3)
die_local_to_scene_rotation[0, 0:3] = angle_cos * die_local_right_axis + angle_sin * die_local_forward_axis
die_local_to_scene_rotation[1, 0:3] = die_local_up_axis
die_local_to_scene_rotation[2, 0:3] = - angle_sin * die_local_right_axis + angle_cos * die_local_forward_axis
for x_axis_rotation_deg in x_axis_rotations_deg:
x_axis_rotation = Rotation.from_euler('x', x_axis_rotation_deg, degrees = True)
x_axis_rotation_matrix = x_axis_rotation.as_matrix()
die_combined_rotation = x_axis_rotation_matrix @ die_local_to_scene_rotation
die_rotation_rodrigues, die_rotation_rodrigues_jacobian = cv2.Rodrigues(die_combined_rotation)
#NB Return in 'pyrender' coordinates
approx_pose = Pose(True, die_rotation_rodrigues, bb_translation_pyrender_coords)
pose_result = PoseResult(approx_pose, {}, in_pyrender_coords = True)
pose_result.y_angle = y_angle_final
pose_result.y_angle_rel = y_angle_deg + y_rot_offset_deg
pose_result.x_angle = x_axis_rotation_deg
pose_results.append(pose_result)
return pose_results
else:
raise NotImplementedError("Cannot get approximate die pose if bounding box PNP was not found.")
def _match_points_with_point_cloud_registration(dot_centers_cv : np.ndarray, local_dots_to_project : np.ndarray, approximate_pose_result : PoseResult, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray, matching_style = 'rigid_registration'):
"""Matches projected (from approximate pose result) with found dot center points, using Coherent Point Drift Algorithm registration between the 2D sets of points.
Returns lists of projected and found indices for matching points."""
#How to match (in 2D, in particular)? Affine transformations? etc? Match by (relative) dot-to-dot distances?
#See, for example, https://en.wikipedia.org/wiki/Point_set_registration, http://greatpanic.com/pntmatch.html, or Affine Consistency Check of Features in KLT Tracker http://cecas.clemson.edu/~stb/klt/
#Implementations can be found in https://pypi.org/project/pycpd/, ...
#Here we'll try to use RigidRegistration or AffineRegistration as an alternative to matching points directly by distance function
#While this type of approach can perform fairly well with only a rough approximate pose, we find that a multi-stage matching and pose estimation approach is more consistent.
local_dots_projected, local_dots_projected_jacobian = cv2.projectPoints(local_dots_to_project, approximate_pose_result.pose_cv.rotation_rodrigues, approximate_pose_result.pose_cv.translation, camera_matrix, distortion_coefficients)
local_dots_projected = np.squeeze(local_dots_projected)
registration_dictionary = {'X': dot_centers_cv, 'Y': local_dots_projected}
if matching_style == 'affine_registration':
registration = AffineRegistration(**registration_dictionary)
else:
registration = RigidRegistration(**registration_dictionary)
registration.register()
registration.transform_point_cloud()
dot_centers_transformed = registration.TY
match_cutoff_score = 0.9
#Registration.P has shape M, N where M is num projected, N num found (in dot_centers_cv)
projected_indices_assigned, found_indices_assigned = linear_sum_assignment(1 - registration.P)#Returns (row_indices, column_indices)
registration_match_score_mask = registration.P[projected_indices_assigned, found_indices_assigned] > match_cutoff_score
projected_indices, found_indices = projected_indices_assigned[registration_match_score_mask], found_indices_assigned[registration_match_score_mask]
additional_data = { 'dot_centers_transformed' : dot_centers_transformed, 'local_dots_projected' : local_dots_projected}
return projected_indices, found_indices, additional_data
def _get_die_pose_from_projected(bounding_box_cv : np.ndarray, dot_centers_cv : np.ndarray, local_dots_to_project : np.ndarray, approximate_pose_result : PoseResult, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray) -> PoseResult:
"""Determines a die's pose that matches projection of local dots to found dot_centers_cv to solve Perspective-n-Point problem. Uses an approximate pose result as initial guess."""
#Note that we need a candidate pose to start performing matching, since are knows are model points (in 3D) and image-space dot locations (in 2D).
#Therefore, we must either project the model points onto the image to 2D (given a candidate pose), or reproject the dot locations back to 3D (again, given a candidate pose).
#Since the former might yield some model-space dots incorrectly omitted (since they face away from camera in candidate pose), the 3D matching problem gives more flexibility.
#However, without knowing the dot size (image, and model space) there's no way to determine each dot's z-distance from the camera, so we'd need a projective matching/registration algorithm, which would supercede doing something like PNP.
if approximate_pose_result.comparison_indices is not None and approximate_pose_result.projected_indices is not None:
projected_indices = approximate_pose_result.projected_indices
found_indices = approximate_pose_result.comparison_indices
additional_data = {}
#Note that poorer matches here will be handled with Ransac/outlier exclusion below.
else:
projected_indices, found_indices, additional_data = _match_points_with_point_cloud_registration(dot_centers_cv, local_dots_to_project, approximate_pose_result, camera_matrix, distortion_coefficients)
local_dots_for_pnp_masked = local_dots_to_project[projected_indices, :]
dot_centers_cv_masked = dot_centers_cv[found_indices, :]
extrinsic_rvec = approximate_pose_result.pose_cv.rotation_rodrigues.copy()
extrinsic_tvec = approximate_pose_result.pose_cv.translation.copy()
num_dots_min = len(found_indices)
inlier_distance = inlier_cutoff_px
perform_iterative = False
#NB It seems SolvePNP may not work for < 4 points, even in Iterative/useExtrinsicGuess case it claims to handle, and correspondingly the RANSAC version needs at least one more point to be meaningful.
if num_dots_min >= 5:
pnp_flags = cv2.SOLVEPNP_ITERATIVE
matched_pnp = cv2.solvePnPRansac(local_dots_for_pnp_masked, dot_centers_cv_masked, camera_matrix, distortion_coefficients, reprojectionError = inlier_distance, rvec = extrinsic_rvec, tvec = extrinsic_tvec, useExtrinsicGuess = True, flags = pnp_flags)
pose_cv = Pose.create_from_cv_results(matched_pnp)
if pose_cv:
pose_result = PoseResult(pose_cv, additional_data, in_pyrender_coords = False)
else:
perform_iterative = True
elif num_dots_min == 4:
four_dot_pnp_flags = cv2.SOLVEPNP_AP3P
four_dot_pnp = cv2.solvePnP(local_dots_for_pnp_masked, dot_centers_cv_masked, camera_matrix, distortion_coefficients, flags = four_dot_pnp_flags)
four_dot_pnp_pose_cv = Pose.create_from_cv_results(four_dot_pnp)
if four_dot_pnp_pose_cv:
pose_result = PoseResult(four_dot_pnp_pose_cv, additional_data, in_pyrender_coords = False)
pose_result.calculate_comparison(dot_centers_cv_masked, camera_matrix, distortion_coefficients)
pose_result_distances = pose_result.comparison_projected_distances[pose_result.comparison_indices, pose_result.projected_indices]
all_distances_inliers = np.all(pose_result_distances < inlier_distance)
perform_iterative = not all_distances_inliers
else:
perform_iterative = True
else:
perform_iterative = True
if perform_iterative:
iterative_pose_result = _get_die_pose_iterative(bounding_box_cv, dot_centers_cv, approximate_pose_result, camera_matrix, distortion_coefficients, get_reprojection_error_sum_assignment_arctan)
pose_result = iterative_pose_result
return pose_result
def _get_die_pose_from_visible_dots(bounding_box_cv : np.ndarray, dot_centers_cv : np.ndarray, approximate_pose_result : PoseResult, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray) -> PoseResult:
"""Gets the die's pose from projection of visible dots matched to found dot_centers_cv, given initial approximate pose result."""
local_dots_facing_pyrender = get_local_dots_facing_camera_from_eye_space_pose(approximate_pose_result.pose_pyrender)
local_dots_to_project = local_dots_facing_pyrender
return _get_die_pose_from_projected(bounding_box_cv, dot_centers_cv, local_dots_to_project, approximate_pose_result, camera_matrix, distortion_coefficients)
def _get_local_dots_projected(trial_pose_cv : Pose, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray):
"""Gets the projected visible dot positions of a die given its pose and camera information."""
local_dots_facing_pyrender = get_local_dots_facing_camera_from_eye_space_pose(trial_pose_cv.get_converted_between_cv_and_pyrender_coords())
local_dots_to_project = local_dots_facing_pyrender
local_dots_projected, local_dots_projected_jacobian = cv2.projectPoints(local_dots_to_project, trial_pose_cv.rotation_rodrigues, trial_pose_cv.translation, camera_matrix, distortion_coefficients)
local_dots_projected = np.squeeze(local_dots_projected, axis = 1)
return local_dots_projected
def get_reprojection_error_sum_assignment_arctan(bounding_box_cv : np.ndarray, dot_centers_cv : np.ndarray, trial_pose_cv : Pose, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray):
"""Gets a reprojection error based on the sum of arctan distances between projected and found points, matched using linear sum assignment."""
local_dots_projected = _get_local_dots_projected(trial_pose_cv, camera_matrix, distortion_coefficients)
comparison_projected_distances = cdist(dot_centers_cv, local_dots_projected)#MxN matrix with M comparison_points and N projected points
distance_scale = distance_scale_px
arctan_scores = distance_scale * np.arctan((comparison_projected_distances/distance_scale)**2)
comparison_indices, projected_indices = linear_sum_assignment(arctan_scores)#Returns (row_indices, column_indices)
matched_scores = arctan_scores[comparison_indices, projected_indices]
matched_scores_rms = np.sqrt(np.mean(np.square(matched_scores)))
projected_dots_distance_outside_dice_bb = np.maximum(np.maximum(0, local_dots_projected - bounding_box_cv[2:4]), np.maximum(0, bounding_box_cv[0:2] - local_dots_projected))
max_distance_outside_dice_bb = np.max(projected_dots_distance_outside_dice_bb)
final_score = matched_scores_rms + max_distance_outside_dice_bb
return final_score
def _get_die_pose_iterative(bounding_box_cv : np.ndarray, dot_centers_cv : np.ndarray, approximate_pose_result : PoseResult, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray, reprojection_error_function) -> PoseResult:
"""Gets a die pose iteratively given a reprojection error function, initial pose estimate, camera information, bounding box and found dot positions dot_centers_cv."""
def get_reprojection_error(trial_pose_rodrigues_translation_cv):
trial_pose_rodrigues_cv = trial_pose_rodrigues_translation_cv[0:3]
trial_pose_translation_cv = trial_pose_rodrigues_translation_cv[3:]
trial_pose_cv = Pose(True, trial_pose_rodrigues_cv, trial_pose_translation_cv)
reproj_error = reprojection_error_function(bounding_box_cv, dot_centers_cv, trial_pose_cv, camera_matrix, distortion_coefficients)
return reproj_error
from scipy.optimize import minimize
initial_guess = approximate_pose_result.pose_cv.as_numpy_array()
minimization_results = minimize(get_reprojection_error, initial_guess, method = 'Nelder-Mead')#NB Nelder-Mead may not be the most efficient method, but a gradient-free method seems best to handle this particular cost function.
minimized_pose = Pose.create_from_numpy_array(minimization_results.x)
return PoseResult(minimized_pose, {}, in_pyrender_coords = False)
def _convert_tensorflow_points_to_opencv(tensor, transpose = False):
"""Converts a tensor of points to an OpenCV-coordinate numpy array."""
tensor_points = tensor.numpy().astype(np.float32)
if transpose:
tensor_points = tensor_points.T
for j in range(tensor_points.shape[1] // 2):
i =j *2
tensor_points[:, [i, i+1]] = tensor_points[:, [i + 1, i]]#Since TF returns y (row) coordinates first, we need to switch x and y for use with OpenCV
return tensor_points
def _get_die_image_bounding_box_pose(bounding_box, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray) -> PoseResult:
"""Get an approximate pose (particularly for translation) by solving PNP problem of the corners of an image-space bounding box."""
box_size = tf.gather(bounding_box, [2, 3], axis = -1) - tf.gather(bounding_box, [0, 1], axis = -1)
max_box_dimension = tf.math.reduce_max(tf.math.abs(box_size))
dice_local_bb_min_max_abs = tf.math.abs(DiceConfig.get_local_bounding_box_min_max())
dice_local_bb_extent = tf.math.reduce_max(dice_local_bb_min_max_abs)
dice_local_bb_extent = tf.cast(dice_local_bb_extent, tf.float32)
#NB Since distance between dots on face 2 is actually slightly greater than to neighbouring dots on other faces, we can't simply cluster based on dot-to-dot distance within each face.
quad_scaling_factor = 1.2#Fitting a slightly larger quad than the dice extent will tend to give more accurate distance results when fitting a quad to the image bounding box.
quad_with_dice_size = (np.array([[-1, -1, 0],[-1, 1, 0], [1, -1, 0], [1, 1, 0]]) * quad_scaling_factor * dice_local_bb_extent.numpy()).astype(np.float32)
bounding_box_corners = tf.stack([tf.gather(bounding_box, [0, 1], axis = -1), tf.gather(bounding_box, [2, 1], axis = -1), tf.gather(bounding_box, [0, 3], axis = -1), tf.gather(bounding_box, [2, 3], axis = -1)], axis = -1)
bounding_box_points = _convert_tensorflow_points_to_opencv(bounding_box_corners, transpose = True)
quad_pnp_results = cv2.solvePnP(quad_with_dice_size, bounding_box_points, camera_matrix, distortion_coefficients)
quad_pnp_pose_cv = Pose.create_from_cv_results(quad_pnp_results)
quad_pnp_pose_results = PoseResult(quad_pnp_pose_cv, {}, in_pyrender_coords = False)
return quad_pnp_pose_results
def _get_die_pose(bounding_box, die_class, die_y_angle, dot_centers_cv : np.ndarray, bounding_box_pose_result : PoseResult, approximate_up_vector_pyrender : np.ndarray, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray) -> PoseResult:
"""Gets the die pose, given its bounding box, class, estimated rotation angle around vertical axis, found dot center points, approximate pose result and up vector, and camera information."""
die_class_np = die_class.numpy()
die_y_angle_deg_np = np.rad2deg(die_y_angle.numpy())
#TODO Handle case of tilted camera (around its forward axis). Up vector could be approximated from plane estimate (if >= 3 dice are found). This would affect the order in which dice bounding box are processed (in order of which die are likely to be in front of others).
x_rotation_from_up_vector_deg = np.rad2deg(np.arctan2(approximate_up_vector_pyrender[2], approximate_up_vector_pyrender[1]))
potential_die_pose_approx_results = _get_approximate_die_pose(die_class_np, die_y_angle_deg_np, bounding_box_pose_result, x_axis_rotations_deg = np.array([0, 7, -7, 15, -15, 30, -30, 45, -45]) + x_rotation_from_up_vector_deg, y_rot_offsets_deg = [0, 10, -10, 20, -20, 45, -45, 60, -60])
#NB OpenCV coords are x right, y down, z forward. pyrender's are x right, y up, z backwards. Both are right-handed but a 180-degree rotation around the x-axis different.
bounding_box_cv = bounding_box.numpy()
bounding_box_cv = bounding_box_cv[[1,0,3,2]]
bounding_box_cv_size = bounding_box_cv[2:] - bounding_box_cv[0:2]
for potential_results in potential_die_pose_approx_results:
potential_results.calculate_comparison(dot_centers_cv, camera_matrix, distortion_coefficients)
max_num_correlations = max([len(r.matched_scores) for r in potential_die_pose_approx_results])
def get_distance_score_with_missing_residuals(pr : PoseResult, num_correlations):
distance_scores = pr.matched_scores
missing_residual_score = pr.assignment_score_function(np.max(bounding_box_cv_size) * 0.5)
return np.sum(distance_scores) + (num_correlations - len(distance_scores)) * missing_residual_score
approx_pose_results_per_projected_indices_set = defaultdict(list)
for potential_results in potential_die_pose_approx_results:
potential_results.distance_score_with_missing_residuals = get_distance_score_with_missing_residuals(potential_results, max_num_correlations)
potential_results.projected_indices_set = frozenset(potential_results.projected_indices)
approx_pose_results_per_projected_indices_set[potential_results.projected_indices_set].append(potential_results)
best_approx_pose_result_per_projected_indices_set = { indices : min(corresponding_pose_results, key = lambda r : r.distance_score_with_missing_residuals) for (indices, corresponding_pose_results) in approx_pose_results_per_projected_indices_set.items() }
best_approx_pose_result = min(best_approx_pose_result_per_projected_indices_set.values(), key = lambda r : r.distance_score_with_missing_residuals)
distance_score_cutoff = best_approx_pose_result.distance_score_with_missing_residuals * approx_distance_score_cutoff_fraction
visible_fit_pose_result_per_projected_indices_set = { indices : _get_die_pose_from_visible_dots(bounding_box_cv, dot_centers_cv, pose_result, camera_matrix, distortion_coefficients) for (indices, pose_result) in best_approx_pose_result_per_projected_indices_set.items() if pose_result.distance_score_with_missing_residuals < distance_score_cutoff}
potential_visible_fit_pose_results = list(visible_fit_pose_result_per_projected_indices_set.values())
if(len(potential_visible_fit_pose_results)) > 0:
for pr in potential_visible_fit_pose_results:
pr.calculate_comparison(dot_centers_cv, camera_matrix, distortion_coefficients)
max_num_correlations_vis = max([len(r.matched_scores) for r in potential_visible_fit_pose_results])
for pr in potential_visible_fit_pose_results:
pr.assignment_inlier_cutoff = pr.assignment_score_function(inlier_cutoff_px)
inlier_matched_scores = pr.matched_scores[pr.matched_scores < pr.assignment_inlier_cutoff]
pr.inlier_matched_scores_rms = np.sqrt(np.mean(np.square(inlier_matched_scores)))
pr.distance_score_with_missing_residuals = get_distance_score_with_missing_residuals(potential_results, max_num_correlations_vis)
visible_fit_pose_results = min(potential_visible_fit_pose_results, key = lambda r : r.inlier_matched_scores_rms)
visible_fit_pose_results.additional_data['approx_pose_result'] = best_approx_pose_result
else:
visible_fit_pose_results = copy.deepcopy(bounding_box_pose_result)
visible_fit_pose_results.additional_data['approx_pose_result'] = best_approx_pose_result
return visible_fit_pose_results
def _get_normal_up_vector(points_roughly_planar : np.ndarray) -> np.ndarray:
"""
Estimates the normal upward-facing vector given a set of points roughly defining a horizontal plane
points_roughly_planar is a 3xN matrix of points roughly defining a plane.
The plane's upward-facing normal will be returned, or a vector most upwardly-pointing in the case of two points.
"""
normal_up = None
if points_roughly_planar.shape[1] < 2:
normal_up = np.array([0, 1, 0])
elif points_roughly_planar.shape[1] == 2:
difference = points_roughly_planar[:, 1] - points_roughly_planar[:, 0]
direction = difference / np.linalg.norm(difference)
up = np.array([0, 1, 0])
plane_other_direction = np.cross(direction, up)
normal = np.cross(direction, plane_other_direction)
normal_up = normal * np.sign(normal[1])
else:
mean = points_roughly_planar.mean(axis=1)
point_differences = points_roughly_planar - mean[:,np.newaxis]
covariance = np.cov(point_differences)
svd = np.linalg.svd(covariance)
normal = svd[0][:,-1]
normal_up = normal * np.sign(normal[1])
return normal_up
def _get_approximate_dice_up_vector(bounding_box_pose_results : List[PoseResult], in_pyrender_coords : bool) -> np.ndarray:
"""Gets an approximate up vector for the die, given approximate pose translations, assuming all die are lying flat on the same plane, thereby pointing upwards."""
if len(bounding_box_pose_results) > 0:
if in_pyrender_coords:
dice_translations = np.hstack([pr.pose_pyrender.translation for pr in bounding_box_pose_results])
else:
dice_translations = np.hstack([pr.pose_cv.translation for pr in bounding_box_pose_results])
else:
dice_translations = np.zeros((0, 0))
#3xN matrix of points at center of die
approx_up = _get_normal_up_vector(dice_translations)
return approx_up
def get_dice_pose_results(bounding_boxes, classes, scores, y_rotation_angles, camera_matrix : np.ndarray, distortion_coefficients : np.ndarray, score_threshold : float = 0.5):
"""Estimates pose results for all die, given estimates for bounding box, die (top face) classes, scores and threshold, rotation angles around vertical axes, and camera information."""
scores_in_threshold = tf.math.greater(scores, score_threshold)
classes_in_score = tf.boolean_mask(classes, scores_in_threshold)
boxes_in_scores = tf.boolean_mask(bounding_boxes, scores_in_threshold)
y_angles_in_scores = tf.boolean_mask(y_rotation_angles, scores_in_threshold)
classes_are_dots = tf.equal(classes_in_score, 0)
classes_are_dice = tf.logical_not(classes_are_dots)
dice_bounding_boxes = tf.boolean_mask(boxes_in_scores, classes_are_dice)
dice_y_angles = tf.boolean_mask(y_angles_in_scores, classes_are_dice)
dice_classes = tf.boolean_mask(classes_in_score, classes_are_dice)
dot_bounding_boxes = tf.boolean_mask(boxes_in_scores, classes_are_dots)
dot_centers = _get_dot_centers(dot_bounding_boxes)
dot_sizes = _get_dot_sizes(dot_bounding_boxes)
#NB Largest box[2] is the box lower bound
dice_bb_lower_y = dice_bounding_boxes[:,2]
dice_indices = tf.argsort(dice_bb_lower_y, axis = -1, direction='DESCENDING')
def get_area(bb):
return tf.math.maximum(bb[:, 3] - bb[:, 1], 0) * tf.math.maximum(bb[:, 2] - bb[:, 0], 0)
dice_indices_np = dice_indices.numpy()
bounding_box_pose_results = [_get_die_image_bounding_box_pose(dice_bounding_boxes[index, :], camera_matrix, distortion_coefficients) for index in dice_indices_np]
approximate_dice_up_vector_pyrender = _get_approximate_dice_up_vector(bounding_box_pose_results, in_pyrender_coords=True)
pose_results = []
for index, bounding_box_pose_result in zip(dice_indices_np, bounding_box_pose_results):
die_box = dice_bounding_boxes[index, :]
die_y_angle = dice_y_angles[index]
die_class = dice_classes[index]
die_box_size = (-die_box[0:2] + die_box[2:4])
dot_centers_fraction_of_die_box = (dot_centers - die_box[0:2]) / die_box_size
dot_centers_rounded_rectangle_distance = tf.norm(tf.math.maximum(tf.math.abs(dot_centers_fraction_of_die_box - 0.5) - 0.5 + rounded_rectangle_radius,0.0), axis = -1) - rounded_rectangle_radius
dots_are_in_rounded_rectangle = dot_centers_rounded_rectangle_distance < 0
dot_bb_intersection_left = tf.math.maximum(dot_bounding_boxes[:, 1], die_box[1])
dot_bb_intersection_right = tf.math.minimum(dot_bounding_boxes[:, 3], die_box[3])
dot_bb_intersection_top = tf.math.maximum(dot_bounding_boxes[:, 0], die_box[0])
dot_bb_intersection_bottom = tf.math.minimum(dot_bounding_boxes[:, 2], die_box[2])
dot_bb_intersection = tf.stack([dot_bb_intersection_top, dot_bb_intersection_left, dot_bb_intersection_bottom, dot_bb_intersection_right], axis = 1)
dot_bb_intersection_area = get_area(dot_bb_intersection)
dot_bb_area = get_area(dot_bounding_boxes)
dot_bb_intersection_over_area = dot_bb_intersection_area / dot_bb_area
dots_have_sufficient_bb_intersection_over_area = tf.greater(dot_bb_intersection_over_area, 0.9)
dots_are_in_box = tf.logical_and(dots_have_sufficient_bb_intersection_over_area, dots_are_in_rounded_rectangle)
dot_centers_in_box = tf.boolean_mask(dot_centers, dots_are_in_box)
dot_centers_cv = _convert_tensorflow_points_to_opencv(dot_centers_in_box)
die_pose_result = _get_die_pose(die_box, die_class, die_y_angle, dot_centers_cv, bounding_box_pose_result, approximate_dice_up_vector_pyrender, camera_matrix, distortion_coefficients)
die_pose_result.calculate_comparison(dot_centers_cv, camera_matrix, distortion_coefficients)
die_pose_result.calculate_inliers(_convert_tensorflow_points_to_opencv(dot_sizes))
pose_results.append(die_pose_result)
indices_in_box = tf.where(dots_are_in_box)
inlier_indices_in_box = tf.gather(indices_in_box, die_pose_result.comparison_inlier_indices)
dot_centers = _delete_tf(dot_centers, inlier_indices_in_box)
dot_sizes = _delete_tf(dot_sizes, inlier_indices_in_box)
dot_bounding_boxes = _delete_tf(dot_bounding_boxes, inlier_indices_in_box)
return pose_results
|
[
"numpy.maximum",
"numpy.arctan2",
"numpy.abs",
"numpy.sum",
"cv2.solvePnP",
"collections.defaultdict",
"numpy.linalg.svd",
"numpy.linalg.norm",
"tensorflow.compat.v2.expand_dims",
"DiceConfig.get_local_face_forward",
"tensorflow.compat.v2.math.greater",
"DiceProjection.get_local_dots_facing_camera_from_eye_space_pose",
"scipy.optimize.minimize",
"tensorflow.compat.v2.math.abs",
"tensorflow.compat.v2.math.maximum",
"pycpd.AffineRegistration",
"DiceConfig.get_local_bounding_box_min_max",
"tensorflow.compat.v2.shape",
"cv2.solvePnPRansac",
"numpy.max",
"tensorflow.compat.v2.boolean_mask",
"DiceConfig.get_local_face_normals",
"CoordsHelper.Pose.create_from_cv_results",
"CoordsHelper.Pose",
"numpy.cov",
"numpy.log1p",
"scipy.spatial.distance.cdist",
"tensorflow.compat.v2.math.minimum",
"copy.deepcopy",
"tensorflow.compat.v2.argsort",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.gather",
"numpy.square",
"numpy.cross",
"cv2.projectPoints",
"tensorflow.compat.v2.math.reduce_max",
"tensorflow.compat.v2.cast",
"numpy.hstack",
"numpy.min",
"cv2.Rodrigues",
"numpy.squeeze",
"numpy.arctan",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.greater",
"tensorflow.compat.v2.reduce_mean",
"numpy.all",
"scipy.optimize.linear_sum_assignment",
"tensorflow.compat.v2.logical_and",
"CoordsHelper.Pose.create_from_numpy_array",
"pycpd.RigidRegistration",
"numpy.deg2rad",
"numpy.zeros",
"tensorflow.compat.v2.logical_not",
"numpy.array",
"tensorflow.compat.v2.ones_like",
"numpy.sign",
"numpy.eye",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.sqrt"
] |
[((7780, 7812), 'tensorflow.compat.v2.ones_like', 'tf.ones_like', (['idx'], {'dtype': 'tf.bool'}), '(idx, dtype=tf.bool)\n', (7792, 7812), True, 'import tensorflow.compat.v2 as tf\n'), ((7879, 7916), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['tensor', 'm'], {'axis': 'axis'}), '(tensor, m, axis=axis)\n', (7894, 7916), True, 'import tensorflow.compat.v2 as tf\n'), ((8099, 8149), 'numpy.max', 'np.max', (['bounding_box_cv_1[0]', 'bounding_box_cv_2[0]'], {}), '(bounding_box_cv_1[0], bounding_box_cv_2[0])\n', (8105, 8149), True, 'import numpy as np\n'), ((8162, 8212), 'numpy.max', 'np.max', (['bounding_box_cv_1[1]', 'bounding_box_cv_2[1]'], {}), '(bounding_box_cv_1[1], bounding_box_cv_2[1])\n', (8168, 8212), True, 'import numpy as np\n'), ((8225, 8275), 'numpy.min', 'np.min', (['bounding_box_cv_1[2]', 'bounding_box_cv_2[2]'], {}), '(bounding_box_cv_1[2], bounding_box_cv_2[2])\n', (8231, 8275), True, 'import numpy as np\n'), ((8288, 8338), 'numpy.min', 'np.min', (['bounding_box_cv_1[3]', 'bounding_box_cv_2[3]'], {}), '(bounding_box_cv_1[3], bounding_box_cv_2[3])\n', (8294, 8338), True, 'import numpy as np\n'), ((8358, 8396), 'numpy.array', 'np.array', (['[x_min, y_min, x_max, y_max]'], {}), '([x_min, y_min, x_max, y_max])\n', (8366, 8396), True, 'import numpy as np\n'), ((9412, 9458), 'tensorflow.compat.v2.gather', 'tf.gather', (['dot_bounding_boxes', '[0, 2]'], {'axis': '(-1)'}), '(dot_bounding_boxes, [0, 2], axis=-1)\n', (9421, 9458), True, 'import tensorflow.compat.v2 as tf\n'), ((9474, 9520), 'tensorflow.compat.v2.gather', 'tf.gather', (['dot_bounding_boxes', '[1, 3]'], {'axis': '(-1)'}), '(dot_bounding_boxes, [1, 3], axis=-1)\n', (9483, 9520), True, 'import tensorflow.compat.v2 as tf\n'), ((9544, 9575), 'tensorflow.compat.v2.reduce_mean', 'tf.reduce_mean', (['axis_0'], {'axis': '(-1)'}), '(axis_0, axis=-1)\n', (9558, 9575), True, 'import tensorflow.compat.v2 as tf\n'), ((9599, 9630), 'tensorflow.compat.v2.reduce_mean', 'tf.reduce_mean', (['axis_1'], {'axis': '(-1)'}), '(axis_1, axis=-1)\n', (9613, 9630), True, 'import tensorflow.compat.v2 as tf\n'), ((9647, 9698), 'tensorflow.compat.v2.stack', 'tf.stack', (['[axis_0_average, axis_1_average]'], {'axis': '(-1)'}), '([axis_0_average, axis_1_average], axis=-1)\n', (9655, 9698), True, 'import tensorflow.compat.v2 as tf\n'), ((9954, 9989), 'tensorflow.compat.v2.stack', 'tf.stack', (['[axis_0, axis_1]'], {'axis': '(-1)'}), '([axis_0, axis_1], axis=-1)\n', (9962, 9989), True, 'import tensorflow.compat.v2 as tf\n'), ((10192, 10227), 'DiceConfig.get_local_face_normals', 'DiceConfig.get_local_face_normals', ([], {}), '()\n', (10225, 10227), False, 'import DiceConfig\n'), ((10257, 10301), 'DiceConfig.get_local_face_forward', 'DiceConfig.get_local_face_forward', (['die_class'], {}), '(die_class)\n', (10290, 10301), False, 'import DiceConfig\n'), ((10394, 10445), 'numpy.cross', 'np.cross', (['die_local_up_axis', 'die_local_forward_axis'], {}), '(die_local_up_axis, die_local_forward_axis)\n', (10402, 10445), True, 'import numpy as np\n'), ((14459, 14645), 'cv2.projectPoints', 'cv2.projectPoints', (['local_dots_to_project', 'approximate_pose_result.pose_cv.rotation_rodrigues', 'approximate_pose_result.pose_cv.translation', 'camera_matrix', 'distortion_coefficients'], {}), '(local_dots_to_project, approximate_pose_result.pose_cv.\n rotation_rodrigues, approximate_pose_result.pose_cv.translation,\n camera_matrix, distortion_coefficients)\n', (14476, 14645), False, 'import cv2\n'), ((14664, 14696), 'numpy.squeeze', 'np.squeeze', (['local_dots_projected'], {}), '(local_dots_projected)\n', (14674, 14696), True, 'import numpy as np\n'), ((15281, 15322), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['(1 - registration.P)'], {}), '(1 - registration.P)\n', (15302, 15322), False, 'from scipy.optimize import linear_sum_assignment\n'), ((20358, 20450), 'DiceProjection.get_local_dots_facing_camera_from_eye_space_pose', 'get_local_dots_facing_camera_from_eye_space_pose', (['approximate_pose_result.pose_pyrender'], {}), '(approximate_pose_result.\n pose_pyrender)\n', (20406, 20450), False, 'from DiceProjection import get_local_dots_facing_camera_from_eye_space_pose\n'), ((21139, 21284), 'cv2.projectPoints', 'cv2.projectPoints', (['local_dots_to_project', 'trial_pose_cv.rotation_rodrigues', 'trial_pose_cv.translation', 'camera_matrix', 'distortion_coefficients'], {}), '(local_dots_to_project, trial_pose_cv.rotation_rodrigues,\n trial_pose_cv.translation, camera_matrix, distortion_coefficients)\n', (21156, 21284), False, 'import cv2\n'), ((21308, 21348), 'numpy.squeeze', 'np.squeeze', (['local_dots_projected'], {'axis': '(1)'}), '(local_dots_projected, axis=1)\n', (21318, 21348), True, 'import numpy as np\n'), ((21872, 21915), 'scipy.spatial.distance.cdist', 'cdist', (['dot_centers_cv', 'local_dots_projected'], {}), '(dot_centers_cv, local_dots_projected)\n', (21877, 21915), False, 'from scipy.spatial.distance import cdist\n'), ((22166, 22202), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['arctan_scores'], {}), '(arctan_scores)\n', (22187, 22202), False, 'from scipy.optimize import linear_sum_assignment\n'), ((22597, 22644), 'numpy.max', 'np.max', (['projected_dots_distance_outside_dice_bb'], {}), '(projected_dots_distance_outside_dice_bb)\n', (22603, 22644), True, 'import numpy as np\n'), ((23755, 23824), 'scipy.optimize.minimize', 'minimize', (['get_reprojection_error', 'initial_guess'], {'method': '"""Nelder-Mead"""'}), "(get_reprojection_error, initial_guess, method='Nelder-Mead')\n", (23763, 23824), False, 'from scipy.optimize import minimize\n'), ((23980, 24032), 'CoordsHelper.Pose.create_from_numpy_array', 'Pose.create_from_numpy_array', (['minimization_results.x'], {}), '(minimization_results.x)\n', (24008, 24032), False, 'from CoordsHelper import Pose\n'), ((25159, 25204), 'tensorflow.compat.v2.math.reduce_max', 'tf.math.reduce_max', (['dice_local_bb_min_max_abs'], {}), '(dice_local_bb_min_max_abs)\n', (25177, 25204), True, 'import tensorflow.compat.v2 as tf\n'), ((25232, 25273), 'tensorflow.compat.v2.cast', 'tf.cast', (['dice_local_bb_extent', 'tf.float32'], {}), '(dice_local_bb_extent, tf.float32)\n', (25239, 25273), True, 'import tensorflow.compat.v2 as tf\n'), ((26151, 26249), 'cv2.solvePnP', 'cv2.solvePnP', (['quad_with_dice_size', 'bounding_box_points', 'camera_matrix', 'distortion_coefficients'], {}), '(quad_with_dice_size, bounding_box_points, camera_matrix,\n distortion_coefficients)\n', (26163, 26249), False, 'import cv2\n'), ((26269, 26314), 'CoordsHelper.Pose.create_from_cv_results', 'Pose.create_from_cv_results', (['quad_pnp_results'], {}), '(quad_pnp_results)\n', (26296, 26314), False, 'from CoordsHelper import Pose\n'), ((28663, 28680), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (28674, 28680), False, 'from collections import defaultdict\n'), ((33696, 33736), 'tensorflow.compat.v2.math.greater', 'tf.math.greater', (['scores', 'score_threshold'], {}), '(scores, score_threshold)\n', (33711, 33736), True, 'import tensorflow.compat.v2 as tf\n'), ((33760, 33805), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['classes', 'scores_in_threshold'], {}), '(classes, scores_in_threshold)\n', (33775, 33805), True, 'import tensorflow.compat.v2 as tf\n'), ((33828, 33880), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['bounding_boxes', 'scores_in_threshold'], {}), '(bounding_boxes, scores_in_threshold)\n', (33843, 33880), True, 'import tensorflow.compat.v2 as tf\n'), ((33906, 33961), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['y_rotation_angles', 'scores_in_threshold'], {}), '(y_rotation_angles, scores_in_threshold)\n', (33921, 33961), True, 'import tensorflow.compat.v2 as tf\n'), ((33986, 34015), 'tensorflow.compat.v2.equal', 'tf.equal', (['classes_in_score', '(0)'], {}), '(classes_in_score, 0)\n', (33994, 34015), True, 'import tensorflow.compat.v2 as tf\n'), ((34039, 34071), 'tensorflow.compat.v2.logical_not', 'tf.logical_not', (['classes_are_dots'], {}), '(classes_are_dots)\n', (34053, 34071), True, 'import tensorflow.compat.v2 as tf\n'), ((34098, 34148), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['boxes_in_scores', 'classes_are_dice'], {}), '(boxes_in_scores, classes_are_dice)\n', (34113, 34148), True, 'import tensorflow.compat.v2 as tf\n'), ((34169, 34222), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['y_angles_in_scores', 'classes_are_dice'], {}), '(y_angles_in_scores, classes_are_dice)\n', (34184, 34222), True, 'import tensorflow.compat.v2 as tf\n'), ((34242, 34293), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['classes_in_score', 'classes_are_dice'], {}), '(classes_in_score, classes_are_dice)\n', (34257, 34293), True, 'import tensorflow.compat.v2 as tf\n'), ((34319, 34369), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['boxes_in_scores', 'classes_are_dots'], {}), '(boxes_in_scores, classes_are_dots)\n', (34334, 34369), True, 'import tensorflow.compat.v2 as tf\n'), ((34591, 34651), 'tensorflow.compat.v2.argsort', 'tf.argsort', (['dice_bb_lower_y'], {'axis': '(-1)', 'direction': '"""DESCENDING"""'}), "(dice_bb_lower_y, axis=-1, direction='DESCENDING')\n", (34601, 34651), True, 'import tensorflow.compat.v2 as tf\n'), ((4747, 4815), 'DiceProjection.get_local_dots_facing_camera_from_eye_space_pose', 'get_local_dots_facing_camera_from_eye_space_pose', (['self.pose_pyrender'], {}), '(self.pose_pyrender)\n', (4795, 4815), False, 'from DiceProjection import get_local_dots_facing_camera_from_eye_space_pose\n'), ((4860, 5051), 'cv2.projectPoints', 'cv2.projectPoints', (['local_dots_visible_in_eye_space', 'self.pose_cv.rotation_rodrigues', 'self.pose_cv.translation', 'self.comparison_camera_matrix', 'self.comparison_distortion_coefficients'], {}), '(local_dots_visible_in_eye_space, self.pose_cv.\n rotation_rodrigues, self.pose_cv.translation, self.\n comparison_camera_matrix, self.comparison_distortion_coefficients)\n', (4877, 5051), False, 'import cv2\n'), ((5074, 5105), 'numpy.squeeze', 'np.squeeze', (['pose_points'], {'axis': '(1)'}), '(pose_points, axis=1)\n', (5084, 5105), True, 'import numpy as np\n'), ((5485, 5540), 'scipy.spatial.distance.cdist', 'cdist', (['self.comparison_points_cv', 'self.projected_points'], {}), '(self.comparison_points_cv, self.projected_points)\n', (5490, 5540), False, 'from scipy.spatial.distance import cdist\n'), ((6193, 6238), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['self.assignment_scores'], {}), '(self.assignment_scores)\n', (6214, 6238), False, 'from scipy.optimize import linear_sum_assignment\n'), ((7498, 7547), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['self.comparison_indices', 'inliers'], {}), '(self.comparison_indices, inliers)\n', (7513, 7547), True, 'import tensorflow.compat.v2 as tf\n'), ((7588, 7636), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['self.projected_indices', 'inliers'], {}), '(self.projected_indices, inliers)\n', (7603, 7636), True, 'import tensorflow.compat.v2 as tf\n'), ((7749, 7765), 'tensorflow.compat.v2.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (7757, 7765), True, 'import tensorflow.compat.v2 as tf\n'), ((8684, 8702), 'numpy.max', 'np.max', (['width', '(0.0)'], {}), '(width, 0.0)\n', (8690, 8702), True, 'import numpy as np\n'), ((8720, 8739), 'numpy.max', 'np.max', (['height', '(0.0)'], {}), '(height, 0.0)\n', (8726, 8739), True, 'import numpy as np\n'), ((14852, 14897), 'pycpd.AffineRegistration', 'AffineRegistration', ([], {}), '(**registration_dictionary)\n', (14870, 14897), False, 'from pycpd import AffineRegistration, RigidRegistration\n'), ((14931, 14975), 'pycpd.RigidRegistration', 'RigidRegistration', ([], {}), '(**registration_dictionary)\n', (14948, 14975), False, 'from pycpd import AffineRegistration, RigidRegistration\n'), ((18294, 18533), 'cv2.solvePnPRansac', 'cv2.solvePnPRansac', (['local_dots_for_pnp_masked', 'dot_centers_cv_masked', 'camera_matrix', 'distortion_coefficients'], {'reprojectionError': 'inlier_distance', 'rvec': 'extrinsic_rvec', 'tvec': 'extrinsic_tvec', 'useExtrinsicGuess': '(True)', 'flags': 'pnp_flags'}), '(local_dots_for_pnp_masked, dot_centers_cv_masked,\n camera_matrix, distortion_coefficients, reprojectionError=\n inlier_distance, rvec=extrinsic_rvec, tvec=extrinsic_tvec,\n useExtrinsicGuess=True, flags=pnp_flags)\n', (18312, 18533), False, 'import cv2\n'), ((18549, 18589), 'CoordsHelper.Pose.create_from_cv_results', 'Pose.create_from_cv_results', (['matched_pnp'], {}), '(matched_pnp)\n', (18576, 18589), False, 'from CoordsHelper import Pose\n'), ((22051, 22116), 'numpy.arctan', 'np.arctan', (['((comparison_projected_distances / distance_scale) ** 2)'], {}), '((comparison_projected_distances / distance_scale) ** 2)\n', (22060, 22116), True, 'import numpy as np\n'), ((22442, 22500), 'numpy.maximum', 'np.maximum', (['(0)', '(local_dots_projected - bounding_box_cv[2:4])'], {}), '(0, local_dots_projected - bounding_box_cv[2:4])\n', (22452, 22500), True, 'import numpy as np\n'), ((22502, 22560), 'numpy.maximum', 'np.maximum', (['(0)', '(bounding_box_cv[0:2] - local_dots_projected)'], {}), '(0, bounding_box_cv[0:2] - local_dots_projected)\n', (22512, 22560), True, 'import numpy as np\n'), ((23388, 23450), 'CoordsHelper.Pose', 'Pose', (['(True)', 'trial_pose_rodrigues_cv', 'trial_pose_translation_cv'], {}), '(True, trial_pose_rodrigues_cv, trial_pose_translation_cv)\n', (23392, 23450), False, 'from CoordsHelper import Pose\n'), ((24889, 24929), 'tensorflow.compat.v2.gather', 'tf.gather', (['bounding_box', '[2, 3]'], {'axis': '(-1)'}), '(bounding_box, [2, 3], axis=-1)\n', (24898, 24929), True, 'import tensorflow.compat.v2 as tf\n'), ((24934, 24974), 'tensorflow.compat.v2.gather', 'tf.gather', (['bounding_box', '[0, 1]'], {'axis': '(-1)'}), '(bounding_box, [0, 1], axis=-1)\n', (24943, 24974), True, 'import tensorflow.compat.v2 as tf\n'), ((25020, 25041), 'tensorflow.compat.v2.math.abs', 'tf.math.abs', (['box_size'], {}), '(box_size)\n', (25031, 25041), True, 'import tensorflow.compat.v2 as tf\n'), ((25087, 25130), 'DiceConfig.get_local_bounding_box_min_max', 'DiceConfig.get_local_bounding_box_min_max', ([], {}), '()\n', (25128, 25130), False, 'import DiceConfig\n'), ((27298, 27383), 'numpy.arctan2', 'np.arctan2', (['approximate_up_vector_pyrender[2]', 'approximate_up_vector_pyrender[1]'], {}), '(approximate_up_vector_pyrender[2], approximate_up_vector_pyrender[1]\n )\n', (27308, 27383), True, 'import numpy as np\n'), ((31165, 31204), 'copy.deepcopy', 'copy.deepcopy', (['bounding_box_pose_result'], {}), '(bounding_box_pose_result)\n', (31178, 31204), False, 'import copy\n'), ((31816, 31835), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (31824, 31835), True, 'import numpy as np\n'), ((33167, 33183), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (33175, 33183), True, 'import numpy as np\n'), ((35817, 35870), 'tensorflow.compat.v2.math.maximum', 'tf.math.maximum', (['dot_bounding_boxes[:, 1]', 'die_box[1]'], {}), '(dot_bounding_boxes[:, 1], die_box[1])\n', (35832, 35870), True, 'import tensorflow.compat.v2 as tf\n'), ((35907, 35960), 'tensorflow.compat.v2.math.minimum', 'tf.math.minimum', (['dot_bounding_boxes[:, 3]', 'die_box[3]'], {}), '(dot_bounding_boxes[:, 3], die_box[3])\n', (35922, 35960), True, 'import tensorflow.compat.v2 as tf\n'), ((35995, 36048), 'tensorflow.compat.v2.math.maximum', 'tf.math.maximum', (['dot_bounding_boxes[:, 0]', 'die_box[0]'], {}), '(dot_bounding_boxes[:, 0], die_box[0])\n', (36010, 36048), True, 'import tensorflow.compat.v2 as tf\n'), ((36086, 36139), 'tensorflow.compat.v2.math.minimum', 'tf.math.minimum', (['dot_bounding_boxes[:, 2]', 'die_box[2]'], {}), '(dot_bounding_boxes[:, 2], die_box[2])\n', (36101, 36139), True, 'import tensorflow.compat.v2 as tf\n'), ((36170, 36298), 'tensorflow.compat.v2.stack', 'tf.stack', (['[dot_bb_intersection_top, dot_bb_intersection_left,\n dot_bb_intersection_bottom, dot_bb_intersection_right]'], {'axis': '(1)'}), '([dot_bb_intersection_top, dot_bb_intersection_left,\n dot_bb_intersection_bottom, dot_bb_intersection_right], axis=1)\n', (36178, 36298), True, 'import tensorflow.compat.v2 as tf\n'), ((36549, 36595), 'tensorflow.compat.v2.greater', 'tf.greater', (['dot_bb_intersection_over_area', '(0.9)'], {}), '(dot_bb_intersection_over_area, 0.9)\n', (36559, 36595), True, 'import tensorflow.compat.v2 as tf\n'), ((36631, 36728), 'tensorflow.compat.v2.logical_and', 'tf.logical_and', (['dots_have_sufficient_bb_intersection_over_area', 'dots_are_in_rounded_rectangle'], {}), '(dots_have_sufficient_bb_intersection_over_area,\n dots_are_in_rounded_rectangle)\n', (36645, 36728), True, 'import tensorflow.compat.v2 as tf\n'), ((36755, 36800), 'tensorflow.compat.v2.boolean_mask', 'tf.boolean_mask', (['dot_centers', 'dots_are_in_box'], {}), '(dot_centers, dots_are_in_box)\n', (36770, 36800), True, 'import tensorflow.compat.v2 as tf\n'), ((37338, 37363), 'tensorflow.compat.v2.where', 'tf.where', (['dots_are_in_box'], {}), '(dots_are_in_box)\n', (37346, 37363), True, 'import tensorflow.compat.v2 as tf\n'), ((37396, 37464), 'tensorflow.compat.v2.gather', 'tf.gather', (['indices_in_box', 'die_pose_result.comparison_inlier_indices'], {}), '(indices_in_box, die_pose_result.comparison_inlier_indices)\n', (37405, 37464), True, 'import tensorflow.compat.v2 as tf\n'), ((4250, 4298), 'numpy.log1p', 'np.log1p', (['((distances / self.distance_scale) ** 2)'], {}), '((distances / self.distance_scale) ** 2)\n', (4258, 4298), True, 'import numpy as np\n'), ((4471, 4520), 'numpy.arctan', 'np.arctan', (['((distances / self.distance_scale) ** 2)'], {}), '((distances / self.distance_scale) ** 2)\n', (4480, 4520), True, 'import numpy as np\n'), ((7173, 7204), 'numpy.abs', 'np.abs', (['difference_bb_fractions'], {}), '(difference_bb_fractions)\n', (7179, 7204), True, 'import numpy as np\n'), ((7235, 7268), 'numpy.max', 'np.max', (['abs_bb_fractions'], {'axis': '(-1)'}), '(abs_bb_fractions, axis=-1)\n', (7241, 7268), True, 'import numpy as np\n'), ((7360, 7396), 'numpy.linalg.norm', 'np.linalg.norm', (['differences'], {'axis': '(-1)'}), '(differences, axis=-1)\n', (7374, 7396), True, 'import numpy as np\n'), ((7836, 7858), 'tensorflow.compat.v2.expand_dims', 'tf.expand_dims', (['idx', '(1)'], {}), '(idx, 1)\n', (7850, 7858), True, 'import tensorflow.compat.v2 as tf\n'), ((11233, 11320), 'numpy.arctan2', 'np.arctan2', (['(-bb_translation_pyrender_coords[0])', '(-bb_translation_pyrender_coords[2])'], {}), '(-bb_translation_pyrender_coords[0], -\n bb_translation_pyrender_coords[2])\n', (11243, 11320), True, 'import numpy as np\n'), ((11837, 11846), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (11843, 11846), True, 'import numpy as np\n'), ((18850, 18982), 'cv2.solvePnP', 'cv2.solvePnP', (['local_dots_for_pnp_masked', 'dot_centers_cv_masked', 'camera_matrix', 'distortion_coefficients'], {'flags': 'four_dot_pnp_flags'}), '(local_dots_for_pnp_masked, dot_centers_cv_masked,\n camera_matrix, distortion_coefficients, flags=four_dot_pnp_flags)\n', (18862, 18982), False, 'import cv2\n'), ((19012, 19053), 'CoordsHelper.Pose.create_from_cv_results', 'Pose.create_from_cv_results', (['four_dot_pnp'], {}), '(four_dot_pnp)\n', (19039, 19053), False, 'from CoordsHelper import Pose\n'), ((22356, 22381), 'numpy.square', 'np.square', (['matched_scores'], {}), '(matched_scores)\n', (22365, 22381), True, 'import numpy as np\n'), ((25837, 25877), 'tensorflow.compat.v2.gather', 'tf.gather', (['bounding_box', '[0, 1]'], {'axis': '(-1)'}), '(bounding_box, [0, 1], axis=-1)\n', (25846, 25877), True, 'import tensorflow.compat.v2 as tf\n'), ((25881, 25921), 'tensorflow.compat.v2.gather', 'tf.gather', (['bounding_box', '[2, 1]'], {'axis': '(-1)'}), '(bounding_box, [2, 1], axis=-1)\n', (25890, 25921), True, 'import tensorflow.compat.v2 as tf\n'), ((25925, 25965), 'tensorflow.compat.v2.gather', 'tf.gather', (['bounding_box', '[0, 3]'], {'axis': '(-1)'}), '(bounding_box, [0, 3], axis=-1)\n', (25934, 25965), True, 'import tensorflow.compat.v2 as tf\n'), ((25969, 26009), 'tensorflow.compat.v2.gather', 'tf.gather', (['bounding_box', '[2, 3]'], {'axis': '(-1)'}), '(bounding_box, [2, 3], axis=-1)\n', (25978, 26009), True, 'import tensorflow.compat.v2 as tf\n'), ((28518, 28541), 'numpy.sum', 'np.sum', (['distance_scores'], {}), '(distance_scores)\n', (28524, 28541), True, 'import numpy as np\n'), ((32034, 32053), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (32042, 32053), True, 'import numpy as np\n'), ((32086, 32109), 'numpy.cross', 'np.cross', (['direction', 'up'], {}), '(direction, up)\n', (32094, 32109), True, 'import numpy as np\n'), ((32127, 32169), 'numpy.cross', 'np.cross', (['direction', 'plane_other_direction'], {}), '(direction, plane_other_direction)\n', (32135, 32169), True, 'import numpy as np\n'), ((32370, 32395), 'numpy.cov', 'np.cov', (['point_differences'], {}), '(point_differences)\n', (32376, 32395), True, 'import numpy as np\n'), ((32410, 32435), 'numpy.linalg.svd', 'np.linalg.svd', (['covariance'], {}), '(covariance)\n', (32423, 32435), True, 'import numpy as np\n'), ((32933, 33010), 'numpy.hstack', 'np.hstack', (['[pr.pose_pyrender.translation for pr in bounding_box_pose_results]'], {}), '([pr.pose_pyrender.translation for pr in bounding_box_pose_results])\n', (32942, 33010), True, 'import numpy as np\n'), ((33057, 33128), 'numpy.hstack', 'np.hstack', (['[pr.pose_cv.translation for pr in bounding_box_pose_results]'], {}), '([pr.pose_cv.translation for pr in bounding_box_pose_results])\n', (33066, 33128), True, 'import numpy as np\n'), ((34692, 34731), 'tensorflow.compat.v2.math.maximum', 'tf.math.maximum', (['(bb[:, 3] - bb[:, 1])', '(0)'], {}), '(bb[:, 3] - bb[:, 1], 0)\n', (34707, 34731), True, 'import tensorflow.compat.v2 as tf\n'), ((34734, 34773), 'tensorflow.compat.v2.math.maximum', 'tf.math.maximum', (['(bb[:, 2] - bb[:, 0])', '(0)'], {}), '(bb[:, 2] - bb[:, 0], 0)\n', (34749, 34773), True, 'import tensorflow.compat.v2 as tf\n'), ((4017, 4070), 'numpy.sqrt', 'np.sqrt', (['(1.0 + (distances / self.distance_scale) ** 2)'], {}), '(1.0 + (distances / self.distance_scale) ** 2)\n', (4024, 4070), True, 'import numpy as np\n'), ((6429, 6459), 'numpy.square', 'np.square', (['self.matched_scores'], {}), '(self.matched_scores)\n', (6438, 6459), True, 'import numpy as np\n'), ((11151, 11172), 'numpy.array', 'np.array', (['[1, -1, -1]'], {}), '([1, -1, -1])\n', (11159, 11172), True, 'import numpy as np\n'), ((11710, 11735), 'numpy.deg2rad', 'np.deg2rad', (['y_angle_final'], {}), '(y_angle_final)\n', (11720, 11735), True, 'import numpy as np\n'), ((11768, 11793), 'numpy.deg2rad', 'np.deg2rad', (['y_angle_final'], {}), '(y_angle_final)\n', (11778, 11793), True, 'import numpy as np\n'), ((12254, 12313), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""x"""', 'x_axis_rotation_deg'], {'degrees': '(True)'}), "('x', x_axis_rotation_deg, degrees=True)\n", (12273, 12313), False, 'from scipy.spatial.transform import Rotation\n'), ((12553, 12589), 'cv2.Rodrigues', 'cv2.Rodrigues', (['die_combined_rotation'], {}), '(die_combined_rotation)\n', (12566, 12589), False, 'import cv2\n'), ((12673, 12739), 'CoordsHelper.Pose', 'Pose', (['(True)', 'die_rotation_rodrigues', 'bb_translation_pyrender_coords'], {}), '(True, die_rotation_rodrigues, bb_translation_pyrender_coords)\n', (12677, 12739), False, 'from CoordsHelper import Pose\n'), ((19477, 19524), 'numpy.all', 'np.all', (['(pose_result_distances < inlier_distance)'], {}), '(pose_result_distances < inlier_distance)\n', (19483, 19524), True, 'import numpy as np\n'), ((27529, 27576), 'numpy.array', 'np.array', (['[0, 7, -7, 15, -15, 30, -30, 45, -45]'], {}), '([0, 7, -7, 15, -15, 30, -30, 45, -45])\n', (27537, 27576), True, 'import numpy as np\n'), ((28467, 28495), 'numpy.max', 'np.max', (['bounding_box_cv_size'], {}), '(bounding_box_cv_size)\n', (28473, 28495), True, 'import numpy as np\n'), ((31994, 32020), 'numpy.linalg.norm', 'np.linalg.norm', (['difference'], {}), '(difference)\n', (32008, 32020), True, 'import numpy as np\n'), ((32199, 32217), 'numpy.sign', 'np.sign', (['normal[1]'], {}), '(normal[1])\n', (32206, 32217), True, 'import numpy as np\n'), ((32495, 32513), 'numpy.sign', 'np.sign', (['normal[1]'], {}), '(normal[1])\n', (32502, 32513), True, 'import numpy as np\n'), ((25669, 25727), 'numpy.array', 'np.array', (['[[-1, -1, 0], [-1, 1, 0], [1, -1, 0], [1, 1, 0]]'], {}), '([[-1, -1, 0], [-1, 1, 0], [1, -1, 0], [1, 1, 0]])\n', (25677, 25727), True, 'import numpy as np\n'), ((30724, 30756), 'numpy.square', 'np.square', (['inlier_matched_scores'], {}), '(inlier_matched_scores)\n', (30733, 30756), True, 'import numpy as np\n'), ((35570, 35620), 'tensorflow.compat.v2.math.abs', 'tf.math.abs', (['(dot_centers_fraction_of_die_box - 0.5)'], {}), '(dot_centers_fraction_of_die_box - 0.5)\n', (35581, 35620), True, 'import tensorflow.compat.v2 as tf\n')]
|
import numpy as np
from scipy import optimize
def circle_fit(coords):
"""
Find the least squares circle fitting a set of 2D points ``(x,y)``.
Parameters
----------
coords : (N, 2) ndarray
Set of ``x`` and ``y`` coordinates.
Returns
-------
centre_i : (2,)
The 2D coordinates of the centre of the circle.
r_i : double
The radius of the circle.
References
----------
.. [1] http://www.scipy.org/Cookbook/Least_Squares_Circle
"""
def r_sq_of_circle(coords, centre):
return np.mean(np.sum((coords - centre) ** 2, axis=1))
def residuals(p, x, y):
x_c, y_c = p
err = np.sqrt((x - x_c) ** 2 + (y - y_c) ** 2)
return err - err.mean()
c_est = np.mean(coords, axis=0)
#r_sq = r_sq_of_circle(coords, c_est)
centre_i, ier = optimize.leastsq(residuals, c_est,
args=(coords[:, 0], coords[:, 1]))
r_i = np.sqrt(r_sq_of_circle(coords, centre_i))
return centre_i, r_i
|
[
"numpy.mean",
"numpy.sqrt",
"numpy.sum",
"scipy.optimize.leastsq"
] |
[((762, 785), 'numpy.mean', 'np.mean', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (769, 785), True, 'import numpy as np\n'), ((849, 918), 'scipy.optimize.leastsq', 'optimize.leastsq', (['residuals', 'c_est'], {'args': '(coords[:, 0], coords[:, 1])'}), '(residuals, c_est, args=(coords[:, 0], coords[:, 1]))\n', (865, 918), False, 'from scipy import optimize\n'), ((676, 716), 'numpy.sqrt', 'np.sqrt', (['((x - x_c) ** 2 + (y - y_c) ** 2)'], {}), '((x - x_c) ** 2 + (y - y_c) ** 2)\n', (683, 716), True, 'import numpy as np\n'), ((572, 610), 'numpy.sum', 'np.sum', (['((coords - centre) ** 2)'], {'axis': '(1)'}), '((coords - centre) ** 2, axis=1)\n', (578, 610), True, 'import numpy as np\n')]
|
#
# Results generators and caching
#
import json
import gevent
import Database
import Options
import RHUtils
import logging
from monotonic import monotonic
from Language import __
from eventmanager import Evt, EventManager
from RHRace import WinCondition
Events = EventManager()
logger = logging.getLogger(__name__)
class CacheStatus:
INVALID = 'invalid'
VALID = 'valid'
def invalidate_all_caches(DB):
''' Check all caches and invalidate any paused builds '''
for race in Database.SavedRaceMeta.query.all():
race.cacheStatus = CacheStatus.INVALID
for heat in Database.Heat.query.all():
heat.cacheStatus = CacheStatus.INVALID
for race_class in Database.RaceClass.query.all():
race_class.cacheStatus = CacheStatus.INVALID
DB.session.commit()
Options.set("eventResults_cacheStatus", CacheStatus.INVALID)
global FULL_RESULTS_CACHE_VALID
FULL_RESULTS_CACHE_VALID = False
Events.trigger(Evt.CACHE_CLEAR)
logger.debug('All Result caches invalidated')
def normalize_cache_status(DB):
''' Check all caches and invalidate any paused builds '''
for race in Database.SavedRaceMeta.query.all():
if race.cacheStatus != CacheStatus.VALID:
race.cacheStatus = CacheStatus.INVALID
for heat in Database.Heat.query.all():
if heat.cacheStatus != CacheStatus.VALID:
heat.cacheStatus = CacheStatus.INVALID
for race_class in Database.RaceClass.query.all():
if race_class.cacheStatus != CacheStatus.VALID:
race_class.cacheStatus = CacheStatus.INVALID
if Options.get("eventResults_cacheStatus") != CacheStatus.VALID:
Options.set("eventResults_cacheStatus", CacheStatus.INVALID)
DB.session.commit()
global FULL_RESULTS_CACHE_VALID
FULL_RESULTS_CACHE_VALID = False
logger.debug('All Result caches normalized')
def build_result_cache(DB, **params):
return {
'results': calc_leaderboard(DB, **params),
'cacheStatus': CacheStatus.VALID
}
def build_race_results_caches(DB, params):
global FULL_RESULTS_CACHE
FULL_RESULTS_CACHE = False
token = monotonic()
race = Database.SavedRaceMeta.query.get(params['race_id'])
heat = Database.Heat.query.get(params['heat_id'])
if heat.class_id != Database.CLASS_ID_NONE:
race_class = Database.RaceClass.query.get(heat.class_id)
race.cacheStatus = token
heat.cacheStatus = token
if heat.class_id != Database.CLASS_ID_NONE:
race_class.cacheStatus = token
Options.set("eventResults_cacheStatus", token)
DB.session.commit()
# rebuild race result
gevent.sleep()
if race.cacheStatus == token:
raceResult = build_result_cache(DB, heat_id=params['heat_id'], round_id=params['round_id'])
race.results = raceResult['results']
race.cacheStatus = raceResult['cacheStatus']
DB.session.commit()
# rebuild heat summary
gevent.sleep()
if heat.cacheStatus == token:
heatResult = build_result_cache(DB, heat_id=params['heat_id'])
heat.results = heatResult['results']
heat.cacheStatus = heatResult['cacheStatus']
DB.session.commit()
# rebuild class summary
if heat.class_id != Database.CLASS_ID_NONE:
if race_class.cacheStatus == token:
gevent.sleep()
classResult = build_result_cache(DB, class_id=heat.class_id)
race_class.results = classResult['results']
race_class.cacheStatus = classResult['cacheStatus']
DB.session.commit()
# rebuild event summary
gevent.sleep()
Options.set("eventResults", json.dumps(calc_leaderboard(DB)))
Options.set("eventResults_cacheStatus", CacheStatus.VALID)
logger.debug('Built result caches: Race {0}, Heat {1}, Class {2}, Event'.format(params['race_id'], params['heat_id'], heat.class_id))
def calc_leaderboard(DB, **params):
''' Generates leaderboards '''
USE_CURRENT = False
USE_ROUND = None
USE_HEAT = None
USE_CLASS = None
if ('current_race' in params):
USE_CURRENT = True
if ('class_id' in params):
USE_CLASS = params['class_id']
elif ('round_id' in params and 'heat_id' in params):
USE_ROUND = params['round_id']
USE_HEAT = params['heat_id']
elif ('heat_id' in params):
USE_ROUND = None
USE_HEAT = params['heat_id']
# Get profile (current), frequencies (current), race query (saved), and race format (all)
if USE_CURRENT:
profile = params['current_profile']
profile_freqs = json.loads(profile.frequencies)
RACE = params['current_race']
race_format = RACE.format
else:
if USE_CLASS:
race_query = Database.SavedRaceMeta.query.filter_by(class_id=USE_CLASS)
if race_query.count() >= 1:
current_format = Database.RaceClass.query.get(USE_CLASS).format_id
else:
current_format = None
elif USE_HEAT:
if USE_ROUND:
race_query = Database.SavedRaceMeta.query.filter_by(heat_id=USE_HEAT, round_id=USE_ROUND)
current_format = race_query.first().format_id
else:
race_query = Database.SavedRaceMeta.query.filter_by(heat_id=USE_HEAT)
if race_query.count() >= 1:
heat_class = race_query.first().class_id
if heat_class:
current_format = Database.RaceClass.query.get(heat_class).format_id
else:
current_format = None
else:
current_format = None
else:
race_query = Database.SavedRaceMeta.query
current_format = None
selected_races = race_query.all()
racelist = [r.id for r in selected_races]
if current_format:
race_format = Database.RaceFormat.query.get(current_format)
else:
race_format = None
gevent.sleep()
# Get the pilot ids for all relevant races
# Add pilot callsigns
# Add pilot team names
# Get total laps for each pilot
# Get hole shot laps
pilot_ids = []
callsigns = []
nodes = []
team_names = []
max_laps = []
current_laps = []
holeshots = []
for pilot in Database.Pilot.query.filter(Database.Pilot.id != Database.PILOT_ID_NONE):
gevent.sleep()
if USE_CURRENT:
laps = []
for node_index in RACE.node_pilots:
if RACE.node_pilots[node_index] == pilot.id and node_index < RACE.num_nodes:
laps = RACE.get_active_laps()[node_index]
break
if laps:
max_lap = len(laps) - 1
else:
max_lap = 0
current_heat = Database.HeatNode.query.filter_by(heat_id=RACE.current_heat, pilot_id=pilot.id).first()
if current_heat and profile_freqs["f"][current_heat.node_index] != RHUtils.FREQUENCY_ID_NONE:
pilot_ids.append(pilot.id)
callsigns.append(pilot.callsign)
nodes.append(current_heat.node_index)
team_names.append(pilot.team)
max_laps.append(max_lap)
current_laps.append(laps)
else:
# find hole shots
holeshot_laps = []
pilotnode = None
for race in racelist:
pilotraces = Database.SavedPilotRace.query \
.filter(Database.SavedPilotRace.pilot_id == pilot.id, \
Database.SavedPilotRace.race_id == race \
).all()
if len(pilotraces):
pilotnode = pilotraces[-1].node_index
for pilotrace in pilotraces:
gevent.sleep()
holeshot_lap = Database.SavedRaceLap.query \
.filter(Database.SavedRaceLap.pilotrace_id == pilotrace.id, \
Database.SavedRaceLap.deleted != 1, \
).order_by(Database.SavedRaceLap.lap_time_stamp).first()
if holeshot_lap:
holeshot_laps.append(holeshot_lap.id)
# get total laps
stat_query = DB.session.query(DB.func.count(Database.SavedRaceLap.id)) \
.filter(Database.SavedRaceLap.pilot_id == pilot.id, \
Database.SavedRaceLap.deleted != 1, \
Database.SavedRaceLap.race_id.in_(racelist), \
~Database.SavedRaceLap.id.in_(holeshot_laps))
max_lap = stat_query.scalar()
if max_lap > 0:
pilot_ids.append(pilot.id)
callsigns.append(pilot.callsign)
team_names.append(pilot.team)
max_laps.append(max_lap)
holeshots.append(holeshot_laps)
nodes.append(pilotnode)
total_time = []
total_time_laps = []
last_lap = []
average_lap = []
fastest_lap = []
consecutives = []
fastest_lap_source = []
consecutives_source = []
for i, pilot in enumerate(pilot_ids):
gevent.sleep()
# Get the total race time for each pilot
if USE_CURRENT:
race_total = 0
laps_total = 0
for lap in current_laps[i]:
race_total += lap['lap_time']
if lap > 0:
laps_total += lap['lap_time']
total_time.append(race_total)
total_time_laps.append(laps_total)
else:
stat_query = DB.session.query(DB.func.sum(Database.SavedRaceLap.lap_time)) \
.filter(Database.SavedRaceLap.pilot_id == pilot, \
Database.SavedRaceLap.deleted != 1, \
Database.SavedRaceLap.race_id.in_(racelist))
if stat_query.scalar():
total_time.append(stat_query.scalar())
else:
total_time.append(0)
stat_query = DB.session.query(DB.func.sum(Database.SavedRaceLap.lap_time)) \
.filter(Database.SavedRaceLap.pilot_id == pilot, \
Database.SavedRaceLap.deleted != 1, \
Database.SavedRaceLap.race_id.in_(racelist), \
~Database.SavedRaceLap.id.in_(holeshots[i]))
if stat_query.scalar():
total_time_laps.append(stat_query.scalar())
else:
total_time_laps.append(0)
gevent.sleep()
# Get the last lap for each pilot (current race only)
if max_laps[i] == 0:
last_lap.append(None) # Add zero if no laps completed
else:
if USE_CURRENT:
last_lap.append(current_laps[i][-1]['lap_time'])
else:
last_lap.append(None)
gevent.sleep()
# Get the average lap time for each pilot
if max_laps[i] == 0:
average_lap.append(0) # Add zero if no laps completed
else:
if USE_CURRENT:
avg_lap = (current_laps[i][-1]['lap_time_stamp'] - current_laps[i][0]['lap_time_stamp']) / (len(current_laps[i]) - 1)
'''
timed_laps = filter(lambda x : x['lap_number'] > 0, current_laps[i])
lap_total = 0
for lap in timed_laps:
lap_total += lap['lap_time']
avg_lap = lap_total / len(timed_laps)
'''
else:
stat_query = DB.session.query(DB.func.avg(Database.SavedRaceLap.lap_time)) \
.filter(Database.SavedRaceLap.pilot_id == pilot, \
Database.SavedRaceLap.deleted != 1, \
Database.SavedRaceLap.race_id.in_(racelist), \
~Database.SavedRaceLap.id.in_(holeshots[i]))
avg_lap = stat_query.scalar()
average_lap.append(avg_lap)
gevent.sleep()
# Get the fastest lap time for each pilot
if max_laps[i] == 0:
fastest_lap.append(0) # Add zero if no laps completed
fastest_lap_source.append(None)
else:
if USE_CURRENT:
timed_laps = filter(lambda x : x['lap_number'] > 0, current_laps[i])
fast_lap = sorted(timed_laps, key=lambda val : val['lap_time'])[0]['lap_time']
fastest_lap_source.append(None)
else:
stat_query = DB.session.query(DB.func.min(Database.SavedRaceLap.lap_time).label('time'), Database.SavedRaceLap.race_id) \
.filter(Database.SavedRaceLap.pilot_id == pilot, \
Database.SavedRaceLap.deleted != 1, \
Database.SavedRaceLap.race_id.in_(racelist), \
~Database.SavedRaceLap.id.in_(holeshots[i])).one()
fast_lap = stat_query.time
if USE_HEAT:
fastest_lap_source.append(None)
else:
source_query = Database.SavedRaceMeta.query.get(stat_query.race_id)
fast_lap_round = source_query.round_id
fast_lap_heat = source_query.heat_id
fast_lap_heatnote = Database.Heat.query.get(fast_lap_heat).note
if fast_lap_heatnote:
source_text = fast_lap_heatnote + ' / ' + __('Round') + ' ' + str(fast_lap_round)
else:
source_text = __('Heat') + ' ' + str(fast_lap_heat) + ' / ' + __('Round') + ' ' + str(fast_lap_round)
fastest_lap_source.append(source_text)
fastest_lap.append(fast_lap)
gevent.sleep()
# find best consecutive 3 laps
if max_laps[i] < 3:
consecutives.append(None)
consecutives_source.append(None)
else:
all_consecutives = []
if USE_CURRENT:
thisrace = current_laps[i][1:]
for j in range(len(thisrace) - 2):
gevent.sleep()
all_consecutives.append({
'time': thisrace[j]['lap_time'] + thisrace[j+1]['lap_time'] + thisrace[j+2]['lap_time'],
'race_id': None,
})
else:
for race_id in racelist:
gevent.sleep()
thisrace = DB.session.query(Database.SavedRaceLap.lap_time) \
.filter(Database.SavedRaceLap.pilot_id == pilot, \
Database.SavedRaceLap.race_id == race_id, \
Database.SavedRaceLap.deleted != 1, \
~Database.SavedRaceLap.id.in_(holeshots[i]) \
).all()
if len(thisrace) >= 3:
for j in range(len(thisrace) - 2):
gevent.sleep()
all_consecutives.append({
'time': thisrace[j].lap_time + thisrace[j+1].lap_time + thisrace[j+2].lap_time,
'race_id': race_id
})
# Sort consecutives
all_consecutives.sort(key = lambda x: (x['time'] is None, x['time']))
# Get lowest not-none value (if any)
if all_consecutives:
consecutives.append(all_consecutives[0]['time'])
if USE_CURRENT:
consecutives_source.append(None)
else:
source_query = Database.SavedRaceMeta.query.get(all_consecutives[0]['race_id'])
if source_query:
fast_lap_round = source_query.round_id
fast_lap_heat = source_query.heat_id
fast_lap_heatnote = Database.Heat.query.get(fast_lap_heat).note
if fast_lap_heatnote:
source_text = fast_lap_heatnote + ' / ' + __('Round') + ' ' + str(fast_lap_round)
else:
source_text = __('Heat') + ' ' + str(fast_lap_heat) + ' / ' + __('Round') + ' ' + str(fast_lap_round)
consecutives_source.append(source_text)
else:
consecutives_source.append(None)
else:
consecutives.append(None)
consecutives_source.append(None)
gevent.sleep()
# Combine for sorting
leaderboard = zip(callsigns, max_laps, total_time, average_lap, fastest_lap, team_names, consecutives, fastest_lap_source, consecutives_source, last_lap, pilot_ids, nodes, total_time_laps)
# Reverse sort max_laps x[1], then sort on total time x[2]
leaderboard_by_race_time = sorted(leaderboard, key = lambda x: (-x[1], x[2] if x[2] > 0 else float('inf')))
leaderboard_total_data = []
last_rank = '-'
last_rank_laps = 0
last_rank_time = RHUtils.time_format(0)
for i, row in enumerate(leaderboard_by_race_time, start=1):
pos = i
total_time = RHUtils.time_format(row[2])
if last_rank_laps == row[1] and last_rank_time == total_time:
pos = last_rank
last_rank = pos
last_rank_laps = row[1]
last_rank_time = total_time
leaderboard_total_data.append({
'position': pos,
'callsign': row[0],
'laps': row[1],
'behind': (leaderboard_by_race_time[0][1] - row[1]),
'total_time': RHUtils.time_format(row[2]),
'total_time_raw': row[2],
'total_time_laps': RHUtils.time_format(row[12]),
'total_time_laps_raw': row[12],
'average_lap': RHUtils.time_format(row[3]),
'fastest_lap': RHUtils.time_format(row[4]),
'fastest_lap_raw': row[4],
'team_name': row[5],
'consecutives': RHUtils.time_format(row[6]),
'consecutives_raw': row[6],
'fastest_lap_source': row[7],
'consecutives_source': row[8],
'last_lap': RHUtils.time_format(row[9]),
'last_lap_raw': row[9],
'pilot_id': row[10],
'node': row[11],
})
gevent.sleep()
# Sort fastest_laps x[4]
leaderboard_by_fastest_lap = sorted(leaderboard, key = lambda x: (x[4] if x[4] > 0 else float('inf')))
leaderboard_fast_lap_data = []
last_rank = '-'
last_rank_lap = 0
for i, row in enumerate(leaderboard_by_fastest_lap, start=1):
pos = i
fast_lap = RHUtils.time_format(row[4])
if last_rank_lap == fast_lap:
pos = last_rank
last_rank = pos
last_rank_laps = fast_lap
leaderboard_fast_lap_data.append({
'position': pos,
'callsign': row[0],
'laps': row[1],
'total_time': RHUtils.time_format(row[2]),
'total_time_raw': row[2],
'total_time_laps': RHUtils.time_format(row[12]),
'total_time_laps_raw': row[12],
'average_lap': RHUtils.time_format(row[3]),
'fastest_lap': RHUtils.time_format(row[4]),
'fastest_lap_raw': row[4],
'team_name': row[5],
'consecutives': RHUtils.time_format(row[6]),
'consecutives_raw': row[6],
'fastest_lap_source': row[7],
'consecutives_source': row[8],
'last_lap': RHUtils.time_format(row[9]),
'last_lap_raw': row[9],
'pilot_id': row[10],
'node': row[11],
})
gevent.sleep()
# Sort consecutives x[6]
leaderboard_by_consecutives = sorted(leaderboard, key = lambda x: (x[6] if x[6] > 0 else float('inf')))
leaderboard_consecutives_data = []
last_rank = '-'
last_rank_consecutive = 0
for i, row in enumerate(leaderboard_by_consecutives, start=1):
pos = i
fast_consecutive = RHUtils.time_format(row[4])
if last_rank_consecutive == fast_consecutive:
pos = last_rank
last_rank = pos
last_rank_consecutive = fast_consecutive
leaderboard_consecutives_data.append({
'position': i,
'callsign': row[0],
'laps': row[1],
'total_time': RHUtils.time_format(row[2]),
'total_time_raw': row[2],
'total_time_laps': RHUtils.time_format(row[12]),
'total_time_laps_raw': row[12],
'average_lap': RHUtils.time_format(row[3]),
'fastest_lap': RHUtils.time_format(row[4]),
'fastest_lap_raw': row[4],
'team_name': row[5],
'consecutives': RHUtils.time_format(row[6]),
'consecutives_raw': row[6],
'fastest_lap_source': row[7],
'consecutives_source': row[8],
'last_lap': RHUtils.time_format(row[9]),
'last_lap_raw': row[9],
'pilot_id': row[10],
'node': row[11],
})
leaderboard_output = {
'by_race_time': leaderboard_total_data,
'by_fastest_lap': leaderboard_fast_lap_data,
'by_consecutives': leaderboard_consecutives_data
}
if race_format:
if race_format.win_condition == WinCondition.FASTEST_3_CONSECUTIVE:
primary_leaderboard = 'by_consecutives'
elif race_format.win_condition == WinCondition.FASTEST_LAP:
primary_leaderboard = 'by_fastest_lap'
else:
# WinCondition.NONE
# WinCondition.MOST_LAPS
# WinCondition.FIRST_TO_LAP_X
primary_leaderboard = 'by_race_time'
leaderboard_output['meta'] = {
'primary_leaderboard': primary_leaderboard,
'win_condition': race_format.win_condition,
'team_racing_mode': race_format.team_racing_mode,
}
else:
leaderboard_output['meta'] = {
'primary_leaderboard': 'by_race_time',
'win_condition': WinCondition.NONE,
'team_racing_mode': False
}
return leaderboard_output
|
[
"Database.HeatNode.query.filter_by",
"Database.Heat.query.get",
"Database.SavedRaceMeta.query.filter_by",
"Language.__",
"Options.set",
"monotonic.monotonic",
"Database.Pilot.query.filter",
"Database.SavedRaceLap.id.in_",
"json.loads",
"Database.RaceClass.query.get",
"Database.SavedRaceLap.query.filter",
"Database.SavedPilotRace.query.filter",
"eventmanager.EventManager",
"Options.get",
"Database.Heat.query.all",
"gevent.sleep",
"Database.SavedRaceLap.race_id.in_",
"Database.RaceClass.query.all",
"Database.SavedRaceMeta.query.all",
"Database.SavedRaceMeta.query.get",
"RHUtils.time_format",
"logging.getLogger",
"Database.RaceFormat.query.get"
] |
[((266, 280), 'eventmanager.EventManager', 'EventManager', ([], {}), '()\n', (278, 280), False, 'from eventmanager import Evt, EventManager\n'), ((291, 318), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (308, 318), False, 'import logging\n'), ((493, 527), 'Database.SavedRaceMeta.query.all', 'Database.SavedRaceMeta.query.all', ([], {}), '()\n', (525, 527), False, 'import Database\n'), ((593, 618), 'Database.Heat.query.all', 'Database.Heat.query.all', ([], {}), '()\n', (616, 618), False, 'import Database\n'), ((690, 720), 'Database.RaceClass.query.all', 'Database.RaceClass.query.all', ([], {}), '()\n', (718, 720), False, 'import Database\n'), ((805, 865), 'Options.set', 'Options.set', (['"""eventResults_cacheStatus"""', 'CacheStatus.INVALID'], {}), "('eventResults_cacheStatus', CacheStatus.INVALID)\n", (816, 865), False, 'import Options\n'), ((1139, 1173), 'Database.SavedRaceMeta.query.all', 'Database.SavedRaceMeta.query.all', ([], {}), '()\n', (1171, 1173), False, 'import Database\n'), ((1293, 1318), 'Database.Heat.query.all', 'Database.Heat.query.all', ([], {}), '()\n', (1316, 1318), False, 'import Database\n'), ((1444, 1474), 'Database.RaceClass.query.all', 'Database.RaceClass.query.all', ([], {}), '()\n', (1472, 1474), False, 'import Database\n'), ((2144, 2155), 'monotonic.monotonic', 'monotonic', ([], {}), '()\n', (2153, 2155), False, 'from monotonic import monotonic\n'), ((2168, 2219), 'Database.SavedRaceMeta.query.get', 'Database.SavedRaceMeta.query.get', (["params['race_id']"], {}), "(params['race_id'])\n", (2200, 2219), False, 'import Database\n'), ((2231, 2273), 'Database.Heat.query.get', 'Database.Heat.query.get', (["params['heat_id']"], {}), "(params['heat_id'])\n", (2254, 2273), False, 'import Database\n'), ((2537, 2583), 'Options.set', 'Options.set', (['"""eventResults_cacheStatus"""', 'token'], {}), "('eventResults_cacheStatus', token)\n", (2548, 2583), False, 'import Options\n'), ((2639, 2653), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (2651, 2653), False, 'import gevent\n'), ((2946, 2960), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (2958, 2960), False, 'import gevent\n'), ((3598, 3612), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (3610, 3612), False, 'import gevent\n'), ((3683, 3741), 'Options.set', 'Options.set', (['"""eventResults_cacheStatus"""', 'CacheStatus.VALID'], {}), "('eventResults_cacheStatus', CacheStatus.VALID)\n", (3694, 3741), False, 'import Options\n'), ((6016, 6030), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (6028, 6030), False, 'import gevent\n'), ((6342, 6414), 'Database.Pilot.query.filter', 'Database.Pilot.query.filter', (['(Database.Pilot.id != Database.PILOT_ID_NONE)'], {}), '(Database.Pilot.id != Database.PILOT_ID_NONE)\n', (6369, 6414), False, 'import Database\n'), ((16576, 16590), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (16588, 16590), False, 'import gevent\n'), ((17083, 17105), 'RHUtils.time_format', 'RHUtils.time_format', (['(0)'], {}), '(0)\n', (17102, 17105), False, 'import RHUtils\n'), ((18351, 18365), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (18363, 18365), False, 'import gevent\n'), ((19697, 19711), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (19709, 19711), False, 'import gevent\n'), ((1597, 1636), 'Options.get', 'Options.get', (['"""eventResults_cacheStatus"""'], {}), "('eventResults_cacheStatus')\n", (1608, 1636), False, 'import Options\n'), ((1667, 1727), 'Options.set', 'Options.set', (['"""eventResults_cacheStatus"""', 'CacheStatus.INVALID'], {}), "('eventResults_cacheStatus', CacheStatus.INVALID)\n", (1678, 1727), False, 'import Options\n'), ((2343, 2386), 'Database.RaceClass.query.get', 'Database.RaceClass.query.get', (['heat.class_id'], {}), '(heat.class_id)\n', (2371, 2386), False, 'import Database\n'), ((4583, 4614), 'json.loads', 'json.loads', (['profile.frequencies'], {}), '(profile.frequencies)\n', (4593, 4614), False, 'import json\n'), ((6424, 6438), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (6436, 6438), False, 'import gevent\n'), ((9212, 9226), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (9224, 9226), False, 'import gevent\n'), ((10563, 10577), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (10575, 10577), False, 'import gevent\n'), ((10907, 10921), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (10919, 10921), False, 'import gevent\n'), ((12025, 12039), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (12037, 12039), False, 'import gevent\n'), ((13783, 13797), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (13795, 13797), False, 'import gevent\n'), ((17207, 17234), 'RHUtils.time_format', 'RHUtils.time_format', (['row[2]'], {}), '(row[2])\n', (17226, 17234), False, 'import RHUtils\n'), ((18681, 18708), 'RHUtils.time_format', 'RHUtils.time_format', (['row[4]'], {}), '(row[4])\n', (18700, 18708), False, 'import RHUtils\n'), ((20049, 20076), 'RHUtils.time_format', 'RHUtils.time_format', (['row[4]'], {}), '(row[4])\n', (20068, 20076), False, 'import RHUtils\n'), ((3325, 3339), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (3337, 3339), False, 'import gevent\n'), ((4744, 4802), 'Database.SavedRaceMeta.query.filter_by', 'Database.SavedRaceMeta.query.filter_by', ([], {'class_id': 'USE_CLASS'}), '(class_id=USE_CLASS)\n', (4782, 4802), False, 'import Database\n'), ((5920, 5965), 'Database.RaceFormat.query.get', 'Database.RaceFormat.query.get', (['current_format'], {}), '(current_format)\n', (5949, 5965), False, 'import Database\n'), ((8525, 8568), 'Database.SavedRaceLap.race_id.in_', 'Database.SavedRaceLap.race_id.in_', (['racelist'], {}), '(racelist)\n', (8558, 8568), False, 'import Database\n'), ((9857, 9900), 'Database.SavedRaceLap.race_id.in_', 'Database.SavedRaceLap.race_id.in_', (['racelist'], {}), '(racelist)\n', (9890, 9900), False, 'import Database\n'), ((10284, 10327), 'Database.SavedRaceLap.race_id.in_', 'Database.SavedRaceLap.race_id.in_', (['racelist'], {}), '(racelist)\n', (10317, 10327), False, 'import Database\n'), ((17646, 17673), 'RHUtils.time_format', 'RHUtils.time_format', (['row[2]'], {}), '(row[2])\n', (17665, 17673), False, 'import RHUtils\n'), ((17744, 17772), 'RHUtils.time_format', 'RHUtils.time_format', (['row[12]'], {}), '(row[12])\n', (17763, 17772), False, 'import RHUtils\n'), ((17845, 17872), 'RHUtils.time_format', 'RHUtils.time_format', (['row[3]'], {}), '(row[3])\n', (17864, 17872), False, 'import RHUtils\n'), ((17901, 17928), 'RHUtils.time_format', 'RHUtils.time_format', (['row[4]'], {}), '(row[4])\n', (17920, 17928), False, 'import RHUtils\n'), ((18030, 18057), 'RHUtils.time_format', 'RHUtils.time_format', (['row[6]'], {}), '(row[6])\n', (18049, 18057), False, 'import RHUtils\n'), ((18208, 18235), 'RHUtils.time_format', 'RHUtils.time_format', (['row[9]'], {}), '(row[9])\n', (18227, 18235), False, 'import RHUtils\n'), ((18992, 19019), 'RHUtils.time_format', 'RHUtils.time_format', (['row[2]'], {}), '(row[2])\n', (19011, 19019), False, 'import RHUtils\n'), ((19090, 19118), 'RHUtils.time_format', 'RHUtils.time_format', (['row[12]'], {}), '(row[12])\n', (19109, 19118), False, 'import RHUtils\n'), ((19191, 19218), 'RHUtils.time_format', 'RHUtils.time_format', (['row[3]'], {}), '(row[3])\n', (19210, 19218), False, 'import RHUtils\n'), ((19247, 19274), 'RHUtils.time_format', 'RHUtils.time_format', (['row[4]'], {}), '(row[4])\n', (19266, 19274), False, 'import RHUtils\n'), ((19376, 19403), 'RHUtils.time_format', 'RHUtils.time_format', (['row[6]'], {}), '(row[6])\n', (19395, 19403), False, 'import RHUtils\n'), ((19554, 19581), 'RHUtils.time_format', 'RHUtils.time_format', (['row[9]'], {}), '(row[9])\n', (19573, 19581), False, 'import RHUtils\n'), ((20393, 20420), 'RHUtils.time_format', 'RHUtils.time_format', (['row[2]'], {}), '(row[2])\n', (20412, 20420), False, 'import RHUtils\n'), ((20491, 20519), 'RHUtils.time_format', 'RHUtils.time_format', (['row[12]'], {}), '(row[12])\n', (20510, 20519), False, 'import RHUtils\n'), ((20592, 20619), 'RHUtils.time_format', 'RHUtils.time_format', (['row[3]'], {}), '(row[3])\n', (20611, 20619), False, 'import RHUtils\n'), ((20648, 20675), 'RHUtils.time_format', 'RHUtils.time_format', (['row[4]'], {}), '(row[4])\n', (20667, 20675), False, 'import RHUtils\n'), ((20777, 20804), 'RHUtils.time_format', 'RHUtils.time_format', (['row[6]'], {}), '(row[6])\n', (20796, 20804), False, 'import RHUtils\n'), ((20955, 20982), 'RHUtils.time_format', 'RHUtils.time_format', (['row[9]'], {}), '(row[9])\n', (20974, 20982), False, 'import RHUtils\n'), ((4876, 4915), 'Database.RaceClass.query.get', 'Database.RaceClass.query.get', (['USE_CLASS'], {}), '(USE_CLASS)\n', (4904, 4915), False, 'import Database\n'), ((5060, 5136), 'Database.SavedRaceMeta.query.filter_by', 'Database.SavedRaceMeta.query.filter_by', ([], {'heat_id': 'USE_HEAT', 'round_id': 'USE_ROUND'}), '(heat_id=USE_HEAT, round_id=USE_ROUND)\n', (5098, 5136), False, 'import Database\n'), ((5246, 5302), 'Database.SavedRaceMeta.query.filter_by', 'Database.SavedRaceMeta.query.filter_by', ([], {'heat_id': 'USE_HEAT'}), '(heat_id=USE_HEAT)\n', (5284, 5302), False, 'import Database\n'), ((6850, 6929), 'Database.HeatNode.query.filter_by', 'Database.HeatNode.query.filter_by', ([], {'heat_id': 'RACE.current_heat', 'pilot_id': 'pilot.id'}), '(heat_id=RACE.current_heat, pilot_id=pilot.id)\n', (6883, 6929), False, 'import Database\n'), ((7845, 7859), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (7857, 7859), False, 'import gevent\n'), ((8593, 8636), 'Database.SavedRaceLap.id.in_', 'Database.SavedRaceLap.id.in_', (['holeshot_laps'], {}), '(holeshot_laps)\n', (8621, 8636), False, 'import Database\n'), ((10352, 10394), 'Database.SavedRaceLap.id.in_', 'Database.SavedRaceLap.id.in_', (['holeshots[i]'], {}), '(holeshots[i])\n', (10380, 10394), False, 'import Database\n'), ((11812, 11855), 'Database.SavedRaceLap.race_id.in_', 'Database.SavedRaceLap.race_id.in_', (['racelist'], {}), '(racelist)\n', (11845, 11855), False, 'import Database\n'), ((13118, 13170), 'Database.SavedRaceMeta.query.get', 'Database.SavedRaceMeta.query.get', (['stat_query.race_id'], {}), '(stat_query.race_id)\n', (13150, 13170), False, 'import Database\n'), ((14144, 14158), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (14156, 14158), False, 'import gevent\n'), ((14462, 14476), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (14474, 14476), False, 'import gevent\n'), ((15682, 15746), 'Database.SavedRaceMeta.query.get', 'Database.SavedRaceMeta.query.get', (["all_consecutives[0]['race_id']"], {}), "(all_consecutives[0]['race_id'])\n", (15714, 15746), False, 'import Database\n'), ((7486, 7613), 'Database.SavedPilotRace.query.filter', 'Database.SavedPilotRace.query.filter', (['(Database.SavedPilotRace.pilot_id == pilot.id)', '(Database.SavedPilotRace.race_id == race)'], {}), '(Database.SavedPilotRace.pilot_id ==\n pilot.id, Database.SavedPilotRace.race_id == race)\n', (7522, 7613), False, 'import Database\n'), ((11884, 11926), 'Database.SavedRaceLap.id.in_', 'Database.SavedRaceLap.id.in_', (['holeshots[i]'], {}), '(holeshots[i])\n', (11912, 11926), False, 'import Database\n'), ((13327, 13365), 'Database.Heat.query.get', 'Database.Heat.query.get', (['fast_lap_heat'], {}), '(fast_lap_heat)\n', (13350, 13365), False, 'import Database\n'), ((12813, 12856), 'Database.SavedRaceLap.race_id.in_', 'Database.SavedRaceLap.race_id.in_', (['racelist'], {}), '(racelist)\n', (12846, 12856), False, 'import Database\n'), ((15013, 15027), 'gevent.sleep', 'gevent.sleep', ([], {}), '()\n', (15025, 15027), False, 'import gevent\n'), ((15952, 15990), 'Database.Heat.query.get', 'Database.Heat.query.get', (['fast_lap_heat'], {}), '(fast_lap_heat)\n', (15975, 15990), False, 'import Database\n'), ((5484, 5524), 'Database.RaceClass.query.get', 'Database.RaceClass.query.get', (['heat_class'], {}), '(heat_class)\n', (5512, 5524), False, 'import Database\n'), ((12885, 12927), 'Database.SavedRaceLap.id.in_', 'Database.SavedRaceLap.id.in_', (['holeshots[i]'], {}), '(holeshots[i])\n', (12913, 12927), False, 'import Database\n'), ((7895, 8021), 'Database.SavedRaceLap.query.filter', 'Database.SavedRaceLap.query.filter', (['(Database.SavedRaceLap.pilotrace_id == pilotrace.id)', '(Database.SavedRaceLap.deleted != 1)'], {}), '(Database.SavedRaceLap.pilotrace_id ==\n pilotrace.id, Database.SavedRaceLap.deleted != 1)\n', (7929, 8021), False, 'import Database\n'), ((13480, 13491), 'Language.__', '__', (['"""Round"""'], {}), "('Round')\n", (13482, 13491), False, 'from Language import __\n'), ((13632, 13643), 'Language.__', '__', (['"""Round"""'], {}), "('Round')\n", (13634, 13643), False, 'from Language import __\n'), ((14801, 14843), 'Database.SavedRaceLap.id.in_', 'Database.SavedRaceLap.id.in_', (['holeshots[i]'], {}), '(holeshots[i])\n', (14829, 14843), False, 'import Database\n'), ((16113, 16124), 'Language.__', '__', (['"""Round"""'], {}), "('Round')\n", (16115, 16124), False, 'from Language import __\n'), ((16273, 16284), 'Language.__', '__', (['"""Round"""'], {}), "('Round')\n", (16275, 16284), False, 'from Language import __\n'), ((13584, 13594), 'Language.__', '__', (['"""Heat"""'], {}), "('Heat')\n", (13586, 13594), False, 'from Language import __\n'), ((16225, 16235), 'Language.__', '__', (['"""Heat"""'], {}), "('Heat')\n", (16227, 16235), False, 'from Language import __\n')]
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_canvas.utilities import fdao_canvas_override
from uw_canvas.sis_import import SISImport
from uw_canvas.models import SISImport as SISImportModel
from uw_canvas import MissingAccountID
import mock
class CanvasTestSISImportMissingAccount(TestCase):
def test_import_str(self):
canvas = SISImport()
self.assertRaises(MissingAccountID, canvas.import_str, 'a,b,c,d,e,f')
def test_import_archive(self):
canvas = SISImport()
self.assertRaises(MissingAccountID, canvas.import_archive, None)
@fdao_canvas_override
class CanvasTestSISImport(TestCase):
@mock.patch.object(SISImport, '_post_resource')
def test_import_str(self, mock_post):
canvas = SISImport()
canvas.import_str('a,b,c,d,e,f')
mock_post.assert_called_with((
'/api/v1/accounts/12345/sis_imports.json?'
'import_type=instructure_csv'), {
'Content-Type': 'text/csv'
}, 'a,b,c,d,e,f')
# With extra params
canvas.import_str('a,b,c,d,e,f',
params={'override_sis_stickiness': '1'})
mock_post.assert_called_with((
'/api/v1/accounts/12345/sis_imports.json?import_type='
'instructure_csv&override_sis_stickiness=1'), {
'Content-Type': 'text/csv'
}, 'a,b,c,d,e,f')
@mock.patch.object(SISImport, '_post_resource')
def test_import_archive(self, mock_post):
canvas = SISImport()
canvas.import_archive('')
mock_post.assert_called_with((
'/api/v1/accounts/12345/sis_imports.json?'
'import_type=instructure_csv'), {
'Content-Type': 'application/zip'
}, '')
@mock.patch.object(SISImport, '_post_resource')
@mock.patch.object(SISImport, '_build_archive')
def test_import_dir(self, mock_build, mock_post):
mock_build.return_value = ''
canvas = SISImport()
canvas.import_dir('/path/to/csv')
mock_post.assert_called_with((
'/api/v1/accounts/12345/sis_imports.json?'
'import_type=instructure_csv'), {
'Content-Type': 'application/zip'
}, '')
def test_get_import_status(self):
canvas = SISImport()
sis_import = canvas.get_import_status(self._setup_sis_import())
self.assertEquals(sis_import.import_id, 1)
self.assertEquals(sis_import.workflow_state, "imported")
self.assertEquals(sis_import.progress, "100")
def _setup_sis_import(self):
return SISImportModel(import_id=1)
|
[
"mock.patch.object",
"uw_canvas.sis_import.SISImport",
"uw_canvas.models.SISImport"
] |
[((717, 763), 'mock.patch.object', 'mock.patch.object', (['SISImport', '"""_post_resource"""'], {}), "(SISImport, '_post_resource')\n", (734, 763), False, 'import mock\n'), ((1471, 1517), 'mock.patch.object', 'mock.patch.object', (['SISImport', '"""_post_resource"""'], {}), "(SISImport, '_post_resource')\n", (1488, 1517), False, 'import mock\n'), ((1842, 1888), 'mock.patch.object', 'mock.patch.object', (['SISImport', '"""_post_resource"""'], {}), "(SISImport, '_post_resource')\n", (1859, 1888), False, 'import mock\n'), ((1894, 1940), 'mock.patch.object', 'mock.patch.object', (['SISImport', '"""_build_archive"""'], {}), "(SISImport, '_build_archive')\n", (1911, 1940), False, 'import mock\n'), ((423, 434), 'uw_canvas.sis_import.SISImport', 'SISImport', ([], {}), '()\n', (432, 434), False, 'from uw_canvas.sis_import import SISImport\n'), ((566, 577), 'uw_canvas.sis_import.SISImport', 'SISImport', ([], {}), '()\n', (575, 577), False, 'from uw_canvas.sis_import import SISImport\n'), ((823, 834), 'uw_canvas.sis_import.SISImport', 'SISImport', ([], {}), '()\n', (832, 834), False, 'from uw_canvas.sis_import import SISImport\n'), ((1581, 1592), 'uw_canvas.sis_import.SISImport', 'SISImport', ([], {}), '()\n', (1590, 1592), False, 'from uw_canvas.sis_import import SISImport\n'), ((2049, 2060), 'uw_canvas.sis_import.SISImport', 'SISImport', ([], {}), '()\n', (2058, 2060), False, 'from uw_canvas.sis_import import SISImport\n'), ((2368, 2379), 'uw_canvas.sis_import.SISImport', 'SISImport', ([], {}), '()\n', (2377, 2379), False, 'from uw_canvas.sis_import import SISImport\n'), ((2671, 2698), 'uw_canvas.models.SISImport', 'SISImportModel', ([], {'import_id': '(1)'}), '(import_id=1)\n', (2685, 2698), True, 'from uw_canvas.models import SISImport as SISImportModel\n')]
|
import numpy as np
class KITTICategory(object):
CLASSES = ['Car', 'Pedestrian', 'Cyclist']
CLASS_MEAN_SIZE = {
'Car': np.array([3.88311640418, 1.62856739989, 1.52563191462]),
'Pedestrian': np.array([0.84422524, 0.66068622, 1.76255119]),
'Cyclist': np.array([1.76282397, 0.59706367, 1.73698127]),
}
NUM_SIZE_CLUSTER = len(CLASSES)
MEAN_SIZE_ARRAY = np.zeros((NUM_SIZE_CLUSTER, 3)) # size clustrs
for i in range(NUM_SIZE_CLUSTER):
MEAN_SIZE_ARRAY[i, :] = CLASS_MEAN_SIZE[CLASSES[i]]
|
[
"numpy.array",
"numpy.zeros"
] |
[((400, 431), 'numpy.zeros', 'np.zeros', (['(NUM_SIZE_CLUSTER, 3)'], {}), '((NUM_SIZE_CLUSTER, 3))\n', (408, 431), True, 'import numpy as np\n'), ((139, 194), 'numpy.array', 'np.array', (['[3.88311640418, 1.62856739989, 1.52563191462]'], {}), '([3.88311640418, 1.62856739989, 1.52563191462])\n', (147, 194), True, 'import numpy as np\n'), ((218, 264), 'numpy.array', 'np.array', (['[0.84422524, 0.66068622, 1.76255119]'], {}), '([0.84422524, 0.66068622, 1.76255119])\n', (226, 264), True, 'import numpy as np\n'), ((285, 331), 'numpy.array', 'np.array', (['[1.76282397, 0.59706367, 1.73698127]'], {}), '([1.76282397, 0.59706367, 1.73698127])\n', (293, 331), True, 'import numpy as np\n')]
|
import numpy as np
x = np.ones((2,2))
print("Original array:")
print(x)
print("0 on the border and 1 inside in the array")
x = np.pad(x, pad_width=1, mode='constant', constant_values=0)
print(x)
|
[
"numpy.pad",
"numpy.ones"
] |
[((24, 39), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (31, 39), True, 'import numpy as np\n'), ((132, 190), 'numpy.pad', 'np.pad', (['x'], {'pad_width': '(1)', 'mode': '"""constant"""', 'constant_values': '(0)'}), "(x, pad_width=1, mode='constant', constant_values=0)\n", (138, 190), True, 'import numpy as np\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
from pathlib import Path
import geopandas as gpd
import pandas as pd
from shapely.geometry import box
# +
data_dir = Path("data")
FILEPATHS = {
"small_area_boundaries": data_dir
/ "external"
/ "dublin_small_area_boundaries_in_routing_keys.gpkg",
"nta": data_dir
/ "external"
/ "NTA_grid_boundaries"
/ "ERM_E5R13_Outputs_100Grid.shp",
}
# +
def get_geometries_within(left, right):
left_representative_point = (
left.geometry.representative_point().rename("geometry").to_frame()
)
return (
gpd.sjoin(left_representative_point, right, op="within")
.drop(columns=["geometry", "index_right"])
.merge(left, left_index=True, right_index=True)
.reset_index(drop=True)
)
# + [markdown]
# # Read input data
#
# - Dublin Boundary
# - NTA Boundaries
# - 2016 Small Area Boundaries
#
# **Notes:**
# - Convert spatial data to ITM or epsg=2157 so both in same Coordinate Reference System
# - **Don't forget to Mount Google Drive as otherwise this Notebook won't be able to access the data**
# +
nta_grid_boundaries = gpd.read_file(FILEPATHS["nta"]).to_crs(epsg=2157)
# +
dublin_bounding_box = (
gpd.GeoSeries(box(695000, 712500, 740000, 771000), crs=2157)
.rename("geometry")
.to_frame()
)
# +
dublin_nta_grid_boundaries = get_geometries_within(
nta_grid_boundaries, dublin_bounding_box
)
# +
small_area_boundaries = gpd.read_file(FILEPATHS["small_area_boundaries"]).to_crs(
epsg=2157
)
# + [markdown]
# # Amalgamate NTA grid emissions to Small Areas
# + [markdown]
# ## Join Centroids within NTA Grid
# +
dublin_nta_grid_points_in_small_areas = get_geometries_within(
dublin_nta_grid_boundaries,
small_area_boundaries,
).loc[
:,
[
"NOX",
"NO2",
"PM10",
"PM25",
"HC",
"CO",
"CO2",
"Benz",
"Meth",
"Butad",
"small_area",
"geometry",
],
]
# + [markdown]
# ## Convert CO2 to Energy (TFC & TPER)
# +
CO2_TFC = 0.00384527383473325
TFC_TPER = 1.1
# +
dublin_nta_grid_points_in_small_areas["TFC_kWh"] = (
dublin_nta_grid_points_in_small_areas["CO2"] * CO2_TFC
)
dublin_nta_grid_points_in_small_areas["TPER_kWh"] = (
dublin_nta_grid_points_in_small_areas["TFC_kWh"] * TFC_TPER
)
# +
total_TFC_GWh = dublin_nta_grid_points_in_small_areas["TFC_kWh"].sum() / 1000000
total_TFC_GWh
# + [markdown]
# # Estimate All-of-Dublin Road Transport Road Energy
# +
CO2_TFC = 0.00384527383473325
TFC_TPER = 1.1
# +
dublin_nta_grid_boundaries["TFC_kWh"] = dublin_nta_grid_boundaries["CO2"] * CO2_TFC
dublin_nta_grid_boundaries["TPER_kWh"] = (
dublin_nta_grid_boundaries["TFC_kWh"] * TFC_TPER
)
# +
dublin_nta_grid_boundaries["TPER_kWh"].multiply(10 ** -3).sum()
# +
dublin_nta_grid_boundaries
|
[
"pathlib.Path",
"geopandas.sjoin",
"shapely.geometry.box",
"geopandas.read_file"
] |
[((366, 378), 'pathlib.Path', 'Path', (['"""data"""'], {}), "('data')\n", (370, 378), False, 'from pathlib import Path\n'), ((1339, 1370), 'geopandas.read_file', 'gpd.read_file', (["FILEPATHS['nta']"], {}), "(FILEPATHS['nta'])\n", (1352, 1370), True, 'import geopandas as gpd\n'), ((1658, 1707), 'geopandas.read_file', 'gpd.read_file', (["FILEPATHS['small_area_boundaries']"], {}), "(FILEPATHS['small_area_boundaries'])\n", (1671, 1707), True, 'import geopandas as gpd\n'), ((1436, 1471), 'shapely.geometry.box', 'box', (['(695000)', '(712500)', '(740000)', '(771000)'], {}), '(695000, 712500, 740000, 771000)\n', (1439, 1471), False, 'from shapely.geometry import box\n'), ((794, 850), 'geopandas.sjoin', 'gpd.sjoin', (['left_representative_point', 'right'], {'op': '"""within"""'}), "(left_representative_point, right, op='within')\n", (803, 850), True, 'import geopandas as gpd\n')]
|
#!/usr/bin/env python3
# Copyright 2019 <NAME>. All rights reserved.
# Use of this source code is governed by the GNU-GPL
# license that can be found in the LICENSE file.
# Date created: May 21, 2018
from setuptools import setup, find_packages
from extension import __version__
with open('README.md', 'r') as file:
long_description = file.read()
setup(
name='extension',
version=__version__.__version__,
author='<NAME>',
author_email='<EMAIL>',
description='Utilities for python.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
package_data={
'extension': ['emojis.json', 'resources/*.svg']
},
install_requires=[
'flask>=1.0.2',
'markdown>=3.0.1'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Natural Language :: English',
]
)
|
[
"setuptools.find_packages"
] |
[((618, 633), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (631, 633), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python3
"""
Test all Mappings
"""
from unittest import TestCase, main
from g2p.mappings import Mapping
from g2p.mappings.create_ipa_mapping import create_mapping
from g2p.transducer import Transducer
class MappingCreationTest(TestCase):
def setUp(self):
self.mappings = [
{"in": "ɑ", "out": "AA"},
{"in": "eː", "out": "EY"},
{"in": "i", "out": "IY"},
{"in": "u", "out": "UW"},
{"in": "tʃ", "out": "CH"},
{"in": "p", "out": "P"},
{"in": "t", "out": "T"},
{"in": "k", "out": "K"},
{"in": "w", "out": "W"},
]
self.target_mapping = Mapping(
self.mappings, in_lang='eng-ipa', out_lang='eng-arpabet', out_delimiter=' ')
def test_unigram_mappings(self):
src_mappings = [
{"in": "ᐃ", "out": "i"},
{"in": "ᐅ", "out": "u"},
{"in": "ᐊ", "out": "a"},
]
src_mapping = Mapping(src_mappings, in_lang='crj', out_lang='crj-ipa')
mapping = create_mapping(src_mapping, self.target_mapping)
transducer = Transducer(mapping)
self.assertEqual(transducer('a').output_string, 'ɑ')
self.assertEqual(transducer('i').output_string, 'i')
self.assertEqual(transducer('u').output_string, 'u')
def test_bigram_mappings(self):
src_mappings = [
{"in": "ᐱ", "out": "pi"},
{"in": "ᑎ", "out": "ti"},
{"in": "ᑭ", "out": "ki"},
]
src_mapping = Mapping(src_mappings, in_lang='crj', out_lang='crj-ipa')
mapping = create_mapping(src_mapping, self.target_mapping)
transducer = Transducer(mapping)
self.assertEqual(transducer('pi').output_string, 'pi')
self.assertEqual(transducer('ti').output_string, 'ti')
self.assertEqual(transducer('ki').output_string, 'ki')
def test_trigram_mappings(self):
src_mappings = [
{"in": "ᒋ", "out": "t͡ʃi"},
{"in": "ᒍ", "out": "t͡ʃu"},
{"in": "ᒐ", "out": "t͡ʃa"},
]
src_mapping = Mapping(src_mappings, in_lang='crj', out_lang='crj-ipa')
mapping = create_mapping(src_mapping, self.target_mapping)
transducer = Transducer(mapping)
self.assertEqual(transducer('t͡ʃi').output_string, 'tʃi')
self.assertEqual(transducer('t͡ʃu').output_string, 'tʃu')
self.assertEqual(transducer('t͡ʃa').output_string, 'tʃɑ')
def test_long_mappings(self):
src_mappings = [
{"in": "ᐧᐯ", "out": "pʷeː"},
{"in": "ᐧᑌ", "out": "tʷeː"},
{"in": "ᐧᑫ", "out": "kʷeː"},
]
src_mapping = Mapping(src_mappings, in_lang='crj', out_lang='crj-ipa')
mapping = create_mapping(src_mapping, self.target_mapping)
transducer = Transducer(mapping)
self.assertEqual(transducer('pʷeː').output_string, 'pweː')
self.assertEqual(transducer('tʷeː').output_string, 'tweː')
self.assertEqual(transducer('kʷeː').output_string, 'kweː')
if __name__ == '__main__':
main()
|
[
"unittest.main",
"g2p.mappings.create_ipa_mapping.create_mapping",
"g2p.mappings.Mapping",
"g2p.transducer.Transducer"
] |
[((3106, 3112), 'unittest.main', 'main', ([], {}), '()\n', (3110, 3112), False, 'from unittest import TestCase, main\n'), ((684, 772), 'g2p.mappings.Mapping', 'Mapping', (['self.mappings'], {'in_lang': '"""eng-ipa"""', 'out_lang': '"""eng-arpabet"""', 'out_delimiter': '""" """'}), "(self.mappings, in_lang='eng-ipa', out_lang='eng-arpabet',\n out_delimiter=' ')\n", (691, 772), False, 'from g2p.mappings import Mapping\n'), ((988, 1044), 'g2p.mappings.Mapping', 'Mapping', (['src_mappings'], {'in_lang': '"""crj"""', 'out_lang': '"""crj-ipa"""'}), "(src_mappings, in_lang='crj', out_lang='crj-ipa')\n", (995, 1044), False, 'from g2p.mappings import Mapping\n'), ((1063, 1111), 'g2p.mappings.create_ipa_mapping.create_mapping', 'create_mapping', (['src_mapping', 'self.target_mapping'], {}), '(src_mapping, self.target_mapping)\n', (1077, 1111), False, 'from g2p.mappings.create_ipa_mapping import create_mapping\n'), ((1133, 1152), 'g2p.transducer.Transducer', 'Transducer', (['mapping'], {}), '(mapping)\n', (1143, 1152), False, 'from g2p.transducer import Transducer\n'), ((1544, 1600), 'g2p.mappings.Mapping', 'Mapping', (['src_mappings'], {'in_lang': '"""crj"""', 'out_lang': '"""crj-ipa"""'}), "(src_mappings, in_lang='crj', out_lang='crj-ipa')\n", (1551, 1600), False, 'from g2p.mappings import Mapping\n'), ((1619, 1667), 'g2p.mappings.create_ipa_mapping.create_mapping', 'create_mapping', (['src_mapping', 'self.target_mapping'], {}), '(src_mapping, self.target_mapping)\n', (1633, 1667), False, 'from g2p.mappings.create_ipa_mapping import create_mapping\n'), ((1689, 1708), 'g2p.transducer.Transducer', 'Transducer', (['mapping'], {}), '(mapping)\n', (1699, 1708), False, 'from g2p.transducer import Transducer\n'), ((2113, 2169), 'g2p.mappings.Mapping', 'Mapping', (['src_mappings'], {'in_lang': '"""crj"""', 'out_lang': '"""crj-ipa"""'}), "(src_mappings, in_lang='crj', out_lang='crj-ipa')\n", (2120, 2169), False, 'from g2p.mappings import Mapping\n'), ((2188, 2236), 'g2p.mappings.create_ipa_mapping.create_mapping', 'create_mapping', (['src_mapping', 'self.target_mapping'], {}), '(src_mapping, self.target_mapping)\n', (2202, 2236), False, 'from g2p.mappings.create_ipa_mapping import create_mapping\n'), ((2258, 2277), 'g2p.transducer.Transducer', 'Transducer', (['mapping'], {}), '(mapping)\n', (2268, 2277), False, 'from g2p.transducer import Transducer\n'), ((2707, 2763), 'g2p.mappings.Mapping', 'Mapping', (['src_mappings'], {'in_lang': '"""crj"""', 'out_lang': '"""crj-ipa"""'}), "(src_mappings, in_lang='crj', out_lang='crj-ipa')\n", (2714, 2763), False, 'from g2p.mappings import Mapping\n'), ((2782, 2830), 'g2p.mappings.create_ipa_mapping.create_mapping', 'create_mapping', (['src_mapping', 'self.target_mapping'], {}), '(src_mapping, self.target_mapping)\n', (2796, 2830), False, 'from g2p.mappings.create_ipa_mapping import create_mapping\n'), ((2852, 2871), 'g2p.transducer.Transducer', 'Transducer', (['mapping'], {}), '(mapping)\n', (2862, 2871), False, 'from g2p.transducer import Transducer\n')]
|
"""Steps for features of Oneprovider's spaces.
"""
from selenium.common.exceptions import NoSuchElementException
from pytest_bdd import parsers, given, when, then
from tests.gui.utils.generic import implicit_wait, repeat_failed
from tests.gui.conftest import SELENIUM_IMPLICIT_WAIT, WAIT_BACKEND
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright (C) 2016 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
@repeat_failed(attempts=WAIT_BACKEND, timeout=True)
def _is_home_space(driver, space_name):
spaces = driver.find_elements_by_css_selector('ul.spaces-list '
'li:not([class~="clickable"])')
displayed_name = ''
with implicit_wait(driver, 0.01, SELENIUM_IMPLICIT_WAIT):
for space in spaces:
try:
space.find_element_by_css_selector('.oneicon-space-home')
except NoSuchElementException:
continue
else:
displayed_name = space.find_element_by_css_selector('.item-'
'label')
err_msg = 'home space is {} instead of {}'.format(displayed_name.text,
space_name)
assert displayed_name.text == space_name, err_msg
@given(parsers.parse('user of {browser_id} seen that home space icon '
'was displayed next to name of space "{space_name}" '
'in spaces list'))
def g_assert_home_space_is_space_named(selenium, browser_id, space_name):
driver = selenium[browser_id]
_is_home_space(driver, space_name)
@when(parsers.parse('user of {browser_id} sees that home space icon '
'is displayed next to name of space '
'"{space_name}" in spaces list'))
@then(parsers.parse('user of {browser_id} sees that home space icon '
'is displayed next to name of space '
'"{space_name}" in spaces list'))
@when(parsers.parse('user of {browser_id} sees that home space icon '
'has appeared next to displayed '
'name of space "{space_name}" in spaces list'))
@then(parsers.parse('user of {browser_id} sees that home space icon '
'has appeared next to displayed '
'name of space "{space_name}" in spaces list'))
def wt_assert_home_space_is_space_named(selenium, browser_id, space_name):
driver = selenium[browser_id]
_is_home_space(driver, space_name)
|
[
"tests.gui.utils.generic.implicit_wait",
"tests.gui.utils.generic.repeat_failed",
"pytest_bdd.parsers.parse"
] |
[((492, 542), 'tests.gui.utils.generic.repeat_failed', 'repeat_failed', ([], {'attempts': 'WAIT_BACKEND', 'timeout': '(True)'}), '(attempts=WAIT_BACKEND, timeout=True)\n', (505, 542), False, 'from tests.gui.utils.generic import implicit_wait, repeat_failed\n'), ((1383, 1522), 'pytest_bdd.parsers.parse', 'parsers.parse', (['"""user of {browser_id} seen that home space icon was displayed next to name of space "{space_name}" in spaces list"""'], {}), '(\n \'user of {browser_id} seen that home space icon was displayed next to name of space "{space_name}" in spaces list\'\n )\n', (1396, 1522), False, 'from pytest_bdd import parsers, given, when, then\n'), ((1717, 1855), 'pytest_bdd.parsers.parse', 'parsers.parse', (['"""user of {browser_id} sees that home space icon is displayed next to name of space "{space_name}" in spaces list"""'], {}), '(\n \'user of {browser_id} sees that home space icon is displayed next to name of space "{space_name}" in spaces list\'\n )\n', (1730, 1855), False, 'from pytest_bdd import parsers, given, when, then\n'), ((1899, 2037), 'pytest_bdd.parsers.parse', 'parsers.parse', (['"""user of {browser_id} sees that home space icon is displayed next to name of space "{space_name}" in spaces list"""'], {}), '(\n \'user of {browser_id} sees that home space icon is displayed next to name of space "{space_name}" in spaces list\'\n )\n', (1912, 2037), False, 'from pytest_bdd import parsers, given, when, then\n'), ((2081, 2229), 'pytest_bdd.parsers.parse', 'parsers.parse', (['"""user of {browser_id} sees that home space icon has appeared next to displayed name of space "{space_name}" in spaces list"""'], {}), '(\n \'user of {browser_id} sees that home space icon has appeared next to displayed name of space "{space_name}" in spaces list\'\n )\n', (2094, 2229), False, 'from pytest_bdd import parsers, given, when, then\n'), ((2273, 2421), 'pytest_bdd.parsers.parse', 'parsers.parse', (['"""user of {browser_id} sees that home space icon has appeared next to displayed name of space "{space_name}" in spaces list"""'], {}), '(\n \'user of {browser_id} sees that home space icon has appeared next to displayed name of space "{space_name}" in spaces list\'\n )\n', (2286, 2421), False, 'from pytest_bdd import parsers, given, when, then\n'), ((766, 817), 'tests.gui.utils.generic.implicit_wait', 'implicit_wait', (['driver', '(0.01)', 'SELENIUM_IMPLICIT_WAIT'], {}), '(driver, 0.01, SELENIUM_IMPLICIT_WAIT)\n', (779, 817), False, 'from tests.gui.utils.generic import implicit_wait, repeat_failed\n')]
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Integration test for list_env.py
GOOGLE_APPLICATION_CREDENTIALS must be set to a Service Account for a project
that has enabled the Monitoring API.
Currently the TEST_PROJECT_ID is hard-coded to run using the project created
for this test, but it could be changed to a different project.
"""
import os
import re
import googleapiclient.discovery
import pytest
import list_resources
PROJECT = os.environ['GCLOUD_PROJECT']
METRIC = 'compute.googleapis.com/instance/cpu/usage_time'
@pytest.fixture(scope='module')
def client():
return googleapiclient.discovery.build('monitoring', 'v3')
@pytest.mark.flaky
def test_list_monitored_resources(client, capsys):
PROJECT_RESOURCE = "projects/{}".format(PROJECT)
list_resources.list_monitored_resource_descriptors(
client, PROJECT_RESOURCE)
stdout, _ = capsys.readouterr()
regex = re.compile(
'An application running', re.I)
assert regex.search(stdout) is not None
@pytest.mark.flaky
def test_list_metrics(client, capsys):
PROJECT_RESOURCE = "projects/{}".format(PROJECT)
list_resources.list_metric_descriptors(
client, PROJECT_RESOURCE, METRIC)
stdout, _ = capsys.readouterr()
regex = re.compile(
u'Delta', re.I)
assert regex.search(stdout) is not None
@pytest.mark.flaky
def test_list_timeseries(client, capsys):
PROJECT_RESOURCE = "projects/{}".format(PROJECT)
list_resources.list_timeseries(
client, PROJECT_RESOURCE, METRIC)
stdout, _ = capsys.readouterr()
regex = re.compile(u'list_timeseries response:\n', re.I)
assert regex.search(stdout) is not None
|
[
"list_resources.list_metric_descriptors",
"pytest.fixture",
"list_resources.list_monitored_resource_descriptors",
"list_resources.list_timeseries",
"re.compile"
] |
[((1054, 1084), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1068, 1084), False, 'import pytest\n'), ((1291, 1367), 'list_resources.list_monitored_resource_descriptors', 'list_resources.list_monitored_resource_descriptors', (['client', 'PROJECT_RESOURCE'], {}), '(client, PROJECT_RESOURCE)\n', (1341, 1367), False, 'import list_resources\n'), ((1425, 1467), 're.compile', 're.compile', (['"""An application running"""', 're.I'], {}), "('An application running', re.I)\n", (1435, 1467), False, 'import re\n'), ((1638, 1710), 'list_resources.list_metric_descriptors', 'list_resources.list_metric_descriptors', (['client', 'PROJECT_RESOURCE', 'METRIC'], {}), '(client, PROJECT_RESOURCE, METRIC)\n', (1676, 1710), False, 'import list_resources\n'), ((1768, 1794), 're.compile', 're.compile', (['u"""Delta"""', 're.I'], {}), "(u'Delta', re.I)\n", (1778, 1794), False, 'import re\n'), ((1968, 2032), 'list_resources.list_timeseries', 'list_resources.list_timeseries', (['client', 'PROJECT_RESOURCE', 'METRIC'], {}), '(client, PROJECT_RESOURCE, METRIC)\n', (1998, 2032), False, 'import list_resources\n'), ((2090, 2138), 're.compile', 're.compile', (['u"""list_timeseries response:\n"""', 're.I'], {}), "(u'list_timeseries response:\\n', re.I)\n", (2100, 2138), False, 'import re\n')]
|
import cv2
import numpy as np
'''
This part is just to see how to capture video from
Webcam and show it to you
cap = cv2.VideoCapture(0)
cap.set(3,640) #width
cap.set(4,480) #height
cap.set(10,100) #brightness
while True:
success, img = cap.read()
cv2.imshow('Video',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
'''
img = cv2.imread("D:\Github\python\emilia_small.jpg")
kernal = np.ones((5,5),np.uint8)
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray,(7,7),0)
imgCanny = cv2.Canny(img,150,100)
imgDilation = cv2.dilate(imgCanny,kernal,iterations=1)
imgEroded = cv2.erode(imgDilation,kernal,iterations=1)
##resizing and cropping
height, length, channels = img.shape
imgResize = cv2.resize(img,(600,400))
#cv2.imshow('resized',imgResize)
imgCropped = imgResize[:int(height/2),:int(length/2)]
cv2.imshow('cropped',imgCropped)
#cv2.imshow("gray",imgGray)
#cv2.imshow("Blur",imgBlur)
#cv2.imshow("Canny",imgCanny)
#cv2.imshow("dilation",imgDilation)
#cv2.imshow("Erode",imgEroded)
cv2.waitKey(0)
|
[
"cv2.GaussianBlur",
"cv2.Canny",
"cv2.dilate",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.ones",
"cv2.imread",
"cv2.erode",
"cv2.imshow",
"cv2.resize"
] |
[((354, 404), 'cv2.imread', 'cv2.imread', (['"""D:\\\\Github\\\\python\\\\emilia_small.jpg"""'], {}), "('D:\\\\Github\\\\python\\\\emilia_small.jpg')\n", (364, 404), False, 'import cv2\n'), ((411, 436), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (418, 436), True, 'import numpy as np\n'), ((446, 483), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (458, 483), False, 'import cv2\n'), ((493, 529), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['imgGray', '(7, 7)', '(0)'], {}), '(imgGray, (7, 7), 0)\n', (509, 529), False, 'import cv2\n'), ((538, 562), 'cv2.Canny', 'cv2.Canny', (['img', '(150)', '(100)'], {}), '(img, 150, 100)\n', (547, 562), False, 'import cv2\n'), ((575, 617), 'cv2.dilate', 'cv2.dilate', (['imgCanny', 'kernal'], {'iterations': '(1)'}), '(imgCanny, kernal, iterations=1)\n', (585, 617), False, 'import cv2\n'), ((628, 672), 'cv2.erode', 'cv2.erode', (['imgDilation', 'kernal'], {'iterations': '(1)'}), '(imgDilation, kernal, iterations=1)\n', (637, 672), False, 'import cv2\n'), ((745, 772), 'cv2.resize', 'cv2.resize', (['img', '(600, 400)'], {}), '(img, (600, 400))\n', (755, 772), False, 'import cv2\n'), ((858, 891), 'cv2.imshow', 'cv2.imshow', (['"""cropped"""', 'imgCropped'], {}), "('cropped', imgCropped)\n", (868, 891), False, 'import cv2\n'), ((1045, 1059), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1056, 1059), False, 'import cv2\n')]
|
import numpy as np
from .agent import Agent
class RandMinQlearning(Agent):
def __init__(self, env, thres, discount=0.9, learning_rate=0.01, epsilon=0.1):
super().__init__(env, discount, learning_rate, epsilon)
self.name = "RandMin" + str(thres)
self.q = np.random.uniform(low=-1, high=1, size=(self.n_states, self.n_actions))
self.old_values = np.random.uniform(
low=-1, high=1, size=(self.n_states, self.n_actions)
)
self.thres = thres
def choose_best_action(self, state):
return np.argmax(self.old_values[state])
def calculate_diff(self):
return self.env.get_result(self.q, self.discount)
def update(self, state, action, r, ns):
q_estimate = np.max(self.old_values[ns])
td_target = r + self.discount * q_estimate
td_delta = td_target - self.q[state, action]
self.q[state, action] += self.lr * td_delta
if (
self.q[state, action] <= self.old_values[state, action]
or np.random.rand() < self.thres
):
self.old_values[state, action] = self.q[state, action]
|
[
"numpy.random.rand",
"numpy.random.uniform",
"numpy.max",
"numpy.argmax"
] |
[((285, 356), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(self.n_states, self.n_actions)'}), '(low=-1, high=1, size=(self.n_states, self.n_actions))\n', (302, 356), True, 'import numpy as np\n'), ((383, 454), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(self.n_states, self.n_actions)'}), '(low=-1, high=1, size=(self.n_states, self.n_actions))\n', (400, 454), True, 'import numpy as np\n'), ((561, 594), 'numpy.argmax', 'np.argmax', (['self.old_values[state]'], {}), '(self.old_values[state])\n', (570, 594), True, 'import numpy as np\n'), ((750, 777), 'numpy.max', 'np.max', (['self.old_values[ns]'], {}), '(self.old_values[ns])\n', (756, 777), True, 'import numpy as np\n'), ((1030, 1046), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1044, 1046), True, 'import numpy as np\n')]
|
import warnings
from collections import namedtuple
from typing import List, Union, Tuple, Callable
from openeo.util import deep_get
from openeo.internal.jupyter import render_component
class MetadataException(Exception):
pass
class Dimension:
"""Base class for dimensions."""
def __init__(self, type: str, name: str):
self.type = type
self.name = name
def __repr__(self):
return "{c}({f})".format(
c=self.__class__.__name__,
f=", ".join("{k!s}={v!r}".format(k=k, v=v) for (k, v) in self.__dict__.items())
)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.__dict__ == other.__dict__
def rename(self, name) -> 'Dimension':
"""Create new dimension with new name."""
return Dimension(type=self.type, name=name)
def rename_labels(self, target, source) -> 'Dimension':
"""
Rename labels, if the type of dimension allows it.
@param target: List of target labels
@param source: Source labels, or empty list
@return: A new dimension with modified labels, or the same if no change is applied.
"""
raise MetadataException("Trying to rename labels of dimension %s of type %s, which is not supported." % (self.name,self.type))
class SpatialDimension(Dimension):
DEFAULT_CRS = 4326
def __init__(self, name: str, extent: Union[Tuple[float, float], List[float]], crs: Union[str, int] = DEFAULT_CRS,step = None):
"""
@param name:
@param extent:
@param crs:
@param step: The space between the values. Use null for irregularly spaced steps.
"""
super().__init__(type="spatial", name=name)
self.extent = extent
self.crs = crs
self.step = step
def rename(self, name) -> 'Dimension':
return SpatialDimension(name=name, extent=self.extent, crs=self.crs, step=self.step)
class TemporalDimension(Dimension):
def __init__(self, name: str, extent: Union[Tuple[str, str], List[str]]):
super().__init__(type="temporal", name=name)
self.extent = extent
def rename(self, name) -> 'Dimension':
return TemporalDimension(name=name, extent=self.extent)
# Simple container class for band metadata (name, common name, wavelength in micrometer)
Band = namedtuple("Band", ["name", "common_name", "wavelength_um", "aliases"])
Band.__new__.__defaults__ = (None,)
class BandDimension(Dimension):
def __init__(self, name: str, bands: List[Band]):
super().__init__(type="bands", name=name)
self.bands = bands
@property
def band_names(self) -> List[str]:
return [b.name for b in self.bands]
@property
def band_aliases(self) -> List[List[str]]:
return [b.aliases for b in self.bands]
@property
def common_names(self) -> List[str]:
return [b.common_name for b in self.bands]
def band_index(self, band: Union[int, str]) -> int:
"""
Resolve a given band (common) name/index to band index
:param band: band name, common name or index
:return int: band index
"""
band_names = self.band_names
if isinstance(band, int) and 0 <= band < len(band_names):
return band
elif isinstance(band, str):
common_names = self.common_names
# First try common names if possible
if band in common_names:
return common_names.index(band)
if band in band_names:
return band_names.index(band)
# Check band aliases to still support old band names
aliases = [True if aliases and band in aliases else False for aliases in self.band_aliases]
if any(aliases):
return aliases.index(True)
raise ValueError("Invalid band name/index {b!r}. Valid names: {n!r}".format(b=band, n=band_names))
def band_name(self, band: Union[str, int], allow_common=True) -> str:
"""Resolve (common) name or index to a valid (common) name"""
if isinstance(band, str):
if band in self.band_names:
return band
elif band in self.common_names:
if allow_common:
return band
else:
return self.band_names[self.common_names.index(band)]
elif any([True if aliases and band in aliases else False for aliases in self.band_aliases]):
return self.band_names[self.band_index(band)]
elif isinstance(band, int) and 0 <= band < len(self.bands):
return self.band_names[band]
raise ValueError("Invalid band name/index {b!r}. Valid names: {n!r}".format(b=band, n=self.band_names))
def filter_bands(self, bands: List[Union[int, str]]) -> 'BandDimension':
"""
Construct new BandDimension with subset of bands,
based on given band indices or (common) names
"""
return BandDimension(
name=self.name,
bands=[self.bands[self.band_index(b)] for b in bands]
)
def append_band(self, band: Band) -> 'BandDimension':
"""Create new BandDimension with appended band."""
if band.name in self.band_names:
raise ValueError("Duplicate band {b!r}".format(b=band))
return BandDimension(
name=self.name,
bands=self.bands + [band]
)
def rename_labels(self, target, source) -> 'Dimension':
if not source or len(source) == 0:
source = None
elif len(target) != len(source):
raise ValueError('In rename_labels, the number of labels in target should equal length of source, or the number of original labels in the dimension. Received target labels: %s and source: %s' % (str(target),str(source)))
if source:
new_bands = self.bands.copy()
for old_name,new_name in zip(source,target):
band_index = self.band_index(old_name)
the_band = new_bands[band_index]
new_bands[band_index] = Band(new_name,the_band.common_name,the_band.wavelength_um, the_band.aliases)
else:
new_bands = []
for new_name in target:
new_bands.append(Band(name=new_name,common_name=None,wavelength_um=None))
return BandDimension(self.name,new_bands)
class CollectionMetadata:
"""
Wrapper for Image Collection metadata.
Simplifies getting values from deeply nested mappings,
allows additional parsing and normalizing compatibility issues.
Metadata is expected to follow format defined by
https://openeo.org/documentation/1.0/developers/api/reference.html#operation/describe-collection
(with partial support for older versions)
"""
def __init__(self, metadata: dict, dimensions: List[Dimension] = None):
# Original collection metadata (actual cube metadata might be altered through processes)
self._orig_metadata = metadata
self._dimensions = dimensions or self._parse_dimensions(self._orig_metadata)
self._band_dimension = None
self._temporal_dimension = None
for dim in self._dimensions:
# TODO: here we blindly pick last bands or temporal dimension if multiple. Let user choose?
if dim.type == "bands":
self._band_dimension = dim
if dim.type == "temporal":
self._temporal_dimension = dim
@classmethod
def get_or_create(cls, metadata: Union[dict, 'CollectionMetadata', None]) -> 'CollectionMetadata':
"""Get or create CollectionMetadata from given argument."""
if isinstance(metadata, cls):
return metadata
else:
return cls(metadata=metadata or {})
def _clone_and_update(self, metadata: dict = None, dimensions: List[Dimension] = None, **kwargs) -> 'CollectionMetadata':
"""Create a new instance (of same class) with copied/updated fields."""
cls = type(self)
return cls(metadata=metadata or self._orig_metadata, dimensions=dimensions or self._dimensions, **kwargs)
@classmethod
def _parse_dimensions(cls, spec: dict, complain: Callable[[str], None] = warnings.warn) -> List[Dimension]:
"""
Extract data cube dimension metadata from STAC-like description of a collection.
Dimension metadata comes from different places in spec:
- 'cube:dimensions' has dimension names (e.g. 'x', 'y', 't'), dimension extent info
and band names for band dimensions
- 'eo:bands' has more detailed band information like "common" name and wavelength info
This helper tries to normalize/combine these sources.
:param spec: STAC like collection metadata dict
:param complain: handler for warnings
:return list: list of `Dimension` objects
"""
# Dimension info is in `cube:dimensions` (or 0.4-style `properties/cube:dimensions`)
cube_dimensions = (
deep_get(spec, 'cube:dimensions', default=None)
or deep_get(spec, 'properties', 'cube:dimensions', default=None)
or {}
)
if not cube_dimensions:
complain("No cube:dimensions metadata")
dimensions = []
for name, info in cube_dimensions.items():
dim_type = info.get("type")
if dim_type == "spatial":
dimensions.append(SpatialDimension(
name=name, extent=info.get("extent"), crs=info.get("reference_system", SpatialDimension.DEFAULT_CRS), step=info.get("step",None)
))
elif dim_type == "temporal":
dimensions.append(TemporalDimension(name=name, extent=info.get("extent")))
elif dim_type == "bands":
bands = [Band(b, None, None) for b in info.get("values", [])]
if not bands:
complain("No band names in dimension {d!r}".format(d=name))
dimensions.append(BandDimension(name=name, bands=bands))
else:
complain("Unknown dimension type {t!r}".format(t=dim_type))
dimensions.append(Dimension(name=name, type=dim_type))
# Detailed band information: `summaries/eo:bands` (and 0.4 style `properties/eo:bands`)
eo_bands = (
deep_get(spec, "summaries", "eo:bands", default=None)
or deep_get(spec, "properties", "eo:bands", default=None)
)
if eo_bands:
# center_wavelength is in micrometer according to spec
bands_detailed = [Band(b['name'], b.get('common_name'), b.get('center_wavelength'), b.get('aliases'))
for b in eo_bands]
# Update band dimension with more detailed info
band_dimensions = [d for d in dimensions if d.type == "bands"]
if len(band_dimensions) == 1:
dim = band_dimensions[0]
# Update band values from 'cube:dimensions' with more detailed 'eo:bands' info
eo_band_names = [b.name for b in bands_detailed]
cube_dimension_band_names = [b.name for b in dim.bands]
if eo_band_names == cube_dimension_band_names:
dim.bands = bands_detailed
else:
complain("Band name mismatch: {a} != {b}".format(a=cube_dimension_band_names, b=eo_band_names))
elif len(band_dimensions) == 0:
if len(dimensions) == 0:
complain("Assuming name 'bands' for anonymous band dimension.")
dimensions.append(BandDimension(name="bands", bands=bands_detailed))
else:
complain("No 'bands' dimension in 'cube:dimensions' while having 'eo:bands'")
else:
complain("Multiple dimensions of type 'bands'")
return dimensions
def get(self, *args, default=None):
return deep_get(self._orig_metadata, *args, default=default)
@property
def extent(self) -> dict:
# TODO: is this currently used and relevant?
# TODO: check against extent metadata in dimensions
return self._orig_metadata.get('extent')
def dimension_names(self) -> List[str]:
return list(d.name for d in self._dimensions)
def assert_valid_dimension(self, dimension: str) -> str:
"""Make sure given dimension name is valid."""
names = self.dimension_names()
if dimension not in names:
raise ValueError("Invalid dimension {d!r}. Should be one of {n}".format(d=dimension, n=names))
return dimension
def has_band_dimension(self) -> bool:
return isinstance(self._band_dimension, BandDimension)
@property
def band_dimension(self) -> BandDimension:
"""Dimension corresponding to spectral/logic/thematic "bands"."""
if not self.has_band_dimension():
raise MetadataException("No band dimension")
return self._band_dimension
def has_temporal_dimension(self) -> bool:
return isinstance(self._temporal_dimension, TemporalDimension)
@property
def temporal_dimension(self) -> TemporalDimension:
if not self.has_temporal_dimension():
raise MetadataException("No temporal dimension")
return self._temporal_dimension
@property
def spatial_dimensions(self) -> List[SpatialDimension]:
return [d for d in self._dimensions if isinstance(d, SpatialDimension)]
@property
def bands(self) -> List[Band]:
"""Get band metadata as list of Band metadata tuples"""
return self.band_dimension.bands
@property
def band_names(self) -> List[str]:
"""Get band names of band dimension"""
return self.band_dimension.band_names
@property
def band_common_names(self) -> List[str]:
return self.band_dimension.common_names
def get_band_index(self, band: Union[int, str]) -> int:
return self.band_dimension.band_index(band)
def filter_bands(self, band_names: List[Union[int, str]]) -> 'CollectionMetadata':
"""
Create new `CollectionMetadata` with filtered band dimension
:param band_names: list of band names/indices to keep
:return:
"""
assert self.band_dimension
return self._clone_and_update(dimensions=[
d.filter_bands(band_names) if isinstance(d, BandDimension) else d
for d in self._dimensions
])
def append_band(self, band: Band) -> 'CollectionMetadata':
"""
Create new `CollectionMetadata` with given band added to band dimension.
"""
assert self.band_dimension
return self._clone_and_update(dimensions=[
d.append_band(band) if isinstance(d, BandDimension) else d
for d in self._dimensions
])
def rename_labels(self, dimension: str, target: list, source: list = None) -> 'CollectionMetadata':
"""
Renames the labels of the specified dimension from source to target.
:param dimension: Dimension name
:param target: The new names for the labels.
:param source: The names of the labels as they are currently in the data cube.
:return: Updated metadata
"""
self.assert_valid_dimension(dimension)
loc = self.dimension_names().index(dimension)
new_dimensions = self._dimensions.copy()
new_dimensions[loc] = new_dimensions[loc].rename_labels(target, source)
return self._clone_and_update(dimensions=new_dimensions)
def rename_dimension(self, source: str, target: str) -> 'CollectionMetadata':
"""
Rename source dimension into target, preserving other properties
"""
self.assert_valid_dimension(source)
loc = self.dimension_names().index(source)
new_dimensions = self._dimensions.copy()
new_dimensions[loc] = new_dimensions[loc].rename(target)
return self._clone_and_update(dimensions=new_dimensions)
def reduce_dimension(self, dimension_name: str) -> 'CollectionMetadata':
"""Create new metadata object by collapsing/reducing a dimension."""
# TODO: option to keep reduced dimension (with a single value)?
self.assert_valid_dimension(dimension_name)
loc = self.dimension_names().index(dimension_name)
dimensions = self._dimensions[:loc] + self._dimensions[loc + 1:]
return self._clone_and_update(dimensions=dimensions)
def add_dimension(self, name: str, label: Union[str, float], type: str = None) -> 'CollectionMetadata':
"""Create new metadata object with added dimension"""
if type == "bands":
dim = BandDimension(name=name, bands=[Band(label, None, None)])
elif type == "spatial":
dim = SpatialDimension(name=name, extent=[label, label])
elif type == "temporal":
dim = TemporalDimension(name=name, extent=[label, label])
else:
dim = Dimension(type=type or "other", name=name)
return self._clone_and_update(dimensions=self._dimensions + [dim])
def _repr_html_(self):
return render_component('collection', data = self._orig_metadata)
|
[
"openeo.util.deep_get",
"openeo.internal.jupyter.render_component",
"collections.namedtuple"
] |
[((2358, 2429), 'collections.namedtuple', 'namedtuple', (['"""Band"""', "['name', 'common_name', 'wavelength_um', 'aliases']"], {}), "('Band', ['name', 'common_name', 'wavelength_um', 'aliases'])\n", (2368, 2429), False, 'from collections import namedtuple\n'), ((12065, 12118), 'openeo.util.deep_get', 'deep_get', (['self._orig_metadata', '*args'], {'default': 'default'}), '(self._orig_metadata, *args, default=default)\n', (12073, 12118), False, 'from openeo.util import deep_get\n'), ((17309, 17365), 'openeo.internal.jupyter.render_component', 'render_component', (['"""collection"""'], {'data': 'self._orig_metadata'}), "('collection', data=self._orig_metadata)\n", (17325, 17365), False, 'from openeo.internal.jupyter import render_component\n'), ((9087, 9134), 'openeo.util.deep_get', 'deep_get', (['spec', '"""cube:dimensions"""'], {'default': 'None'}), "(spec, 'cube:dimensions', default=None)\n", (9095, 9134), False, 'from openeo.util import deep_get\n'), ((9154, 9215), 'openeo.util.deep_get', 'deep_get', (['spec', '"""properties"""', '"""cube:dimensions"""'], {'default': 'None'}), "(spec, 'properties', 'cube:dimensions', default=None)\n", (9162, 9215), False, 'from openeo.util import deep_get\n'), ((10435, 10488), 'openeo.util.deep_get', 'deep_get', (['spec', '"""summaries"""', '"""eo:bands"""'], {'default': 'None'}), "(spec, 'summaries', 'eo:bands', default=None)\n", (10443, 10488), False, 'from openeo.util import deep_get\n'), ((10508, 10562), 'openeo.util.deep_get', 'deep_get', (['spec', '"""properties"""', '"""eo:bands"""'], {'default': 'None'}), "(spec, 'properties', 'eo:bands', default=None)\n", (10516, 10562), False, 'from openeo.util import deep_get\n')]
|
'''
My solution to Triangle Peg Solitaire by <NAME>.
https://www.think-maths.co.uk/coin-puzzle
https://www.youtube.com/watch?v=TEkJMFTyZwM
'''
import random
from math import ceil, sqrt
ROUNDS = 10 ** 3
BOARD_DIMENSION = 4
CELL_COUNT = BOARD_DIMENSION * (BOARD_DIMENSION + 1) // 2
def main():
scores = []
for _ in range(ROUNDS):
game = Game()
while game.makeRandomMove():
pass
pieces = game.pieces()
if pieces <= 1:
scores.append(game.moves)
# Combine double, triple, etc. moves into single tuples.
for moves in scores:
i = 0
while i < len(moves) - 1:
if moves[i][-1] == moves[i + 1][0]:
moves[i] = (*moves[i], moves[i + 1][1], moves[i + 1][2])
moves.pop(i + 1)
else:
i += 1
scores.sort(key=len)
cutoff = 3
print(f'Best {cutoff} scores:')
for moves in scores[:cutoff]:
print(f'{len(moves)}: {moves}')
class Game():
def __init__(self):
self.board = [True] * CELL_COUNT
# self.board[random.randrange(CELL_COUNT)] = False
self.board[1] = False
self.moves = []
def makeRandomMove(self):
moveOptions = []
for from_ in range(CELL_COUNT):
if self.board[from_]:
for over, to in self.validMoves(from_):
moveOptions.append((from_, over, to))
if moveOptions:
chosen = random.choice(moveOptions)
self._move(*chosen)
return chosen
else:
return False
def _move(self, from_, over, to):
self.board[from_] = False
self.board[to] = True
self.board[over] = False
self.moves.append((from_, over, to))
def validMoves(self, from_):
# Quadratic formula for n^2 + n - 2x - 2, with floor() to zero-index it.
rowIndex = ceil((-1 + sqrt(9 + 8 * from_)) / 2) - 1
columnIndex = from_ - rowIndex * (rowIndex + 1) // 2
# The values that are added to a cell's index to get the indexes of the cell jumped over and the cell moved to.
possibleMoves = []
if rowIndex - columnIndex > 1:
# Up + right.
possibleMoves.append((-rowIndex, -(rowIndex * 2 - 1)))
# Right.
possibleMoves.append((1, 2))
# Down + right.
possibleMoves.append((rowIndex + 2, rowIndex * 2 + 5))
# Down + left.
possibleMoves.append((rowIndex + 1, rowIndex * 2 + 3))
if columnIndex > 1:
# Left.
possibleMoves.append((-1, -2))
# Up + left.
possibleMoves.append((-(rowIndex + 1), -(rowIndex * 2 + 1)))
# Check that `over` and `to` are not out of the bounds of `board`, `over` is occupied, and `to` is not.
validMoves = []
for overOffset, toOffset in possibleMoves:
over = from_ + overOffset
to = from_ + toOffset
try:
if over >= 0 and self.board[over] and to >= 0 and not self.board[to]:
validMoves.append((over, to))
except IndexError:
pass
return validMoves
def pieces(self):
'''Return the number of pieces remaining in self.board.'''
return sum(self.board)
if __name__ == '__main__':
main()
|
[
"random.choice",
"math.sqrt"
] |
[((1238, 1264), 'random.choice', 'random.choice', (['moveOptions'], {}), '(moveOptions)\n', (1251, 1264), False, 'import random\n'), ((1614, 1633), 'math.sqrt', 'sqrt', (['(9 + 8 * from_)'], {}), '(9 + 8 * from_)\n', (1618, 1633), False, 'from math import ceil, sqrt\n')]
|
from __future__ import annotations
import requests
def get_hackernews_story(story_id: str) -> dict:
url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(url).json()
def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
"""
Get the top max_stories posts from HackerNews - https://news.ycombinator.com/
"""
url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
story_ids = requests.get(url).json()[:max_stories]
return [get_hackernews_story(story_id) for story_id in story_ids]
def hackernews_top_stories_as_markdown(max_stories: int = 10) -> str:
stories = hackernews_top_stories(max_stories)
return "\n".join("* [{title}]({url})".format(**story) for story in stories)
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
|
[
"requests.get"
] |
[((199, 216), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (211, 216), False, 'import requests\n'), ((484, 501), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (496, 501), False, 'import requests\n')]
|
"""
@file: get_circle_point.py
@author: Tatsumi0000
@brief: ブロックサークルと交点サークルを囲むだけで各サークルの座標を取得.
"""
import cv2.cv2 as cv2
import numpy as np
class GetCirclePoint:
def __init__(self, window_name=None):
"""コンストラクタ
"""
self.CROSS_CIRCLE_POINTS = 16 # 交点サークルの個数
self.BLOCK_CIRCLE_POINTS = 8 # ブロックサークルの個数
self.POINTS_NUMBER = 2 # 座標の個数
self.cc_points = np.empty((self.CROSS_CIRCLE_POINTS, self.POINTS_NUMBER), dtype=int) # 交点サークル
self.bc_points = np.empty((self.BLOCK_CIRCLE_POINTS, self.POINTS_NUMBER), dtype=int) # ブロックサークル
self.ix = self.iy = 0 # 起点となる座標
# サークルを選択するモード.0だと.交点サークルを囲むモード.1だとブロックサークルを囲むモード.2以上だと終了モード
self.circle_mode = 0
self.mode = False # Trueだと青い囲む枠が出てくる
# 各サークルの座標がX,Y座標が入っている辞書型(BlockPointListの丸パクリ)
self.named_points = {
"c00": None, "c10": None, "c20": None, "c30": None,
"b1": None, "b2": None, "b3": None,
"c01": None, "c11": None, "c21": None, "c31": None,
"b4": None, "b5": None,
"c02": None, "c12": None, "c22": None, "c32": None,
"b6": None, "b7": None, "b8": None,
"c03": None, "c13": None, "c23": None, "c33": None,
}
self.window_name = window_name
def add(self):
"""
交点サークル,ブロックサークルの座標を辞書型に代入する.
きちんとソートされていないとめちゃくちゃになるので,取り扱い注意.
:return:
"""
# ブロックサークルの座標を代入.
for i in range(self.BLOCK_CIRCLE_POINTS):
key = 'b{0}'.format(i + 1)
self.named_points[key] = [int(p) for p in self.bc_points[i]]
# 交点サークルの座標を代入.
cross_circle_count = 0
for i in range(4):
for j in range(4):
key = 'c{0}{1}'.format(j, i)
self.named_points[key] = [int(p) for p in self.cc_points[cross_circle_count]]
cross_circle_count += 1
@staticmethod
def draw_points(img, x, y):
"""
座標を描画する
:param img: 画像
:type img: ndarray
:param x: x座標
:type x: int
:param y: y座標
:param y: int
:return None:
"""
cv2.putText(img, '({0}, {1})'.format(x, y), (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255))
def draw_all_points(self, img, points, win_name=None):
if win_name is None:
win_name = self.window_name
for i in range(points.shape[0]):
# print("({0}, {1})" .format(cc_points[i, 0], cc_points[i, 1]))
self.draw_points(img, points[i, 0], points[i, 1])
# print("{},{}".format(calc_cc_points[i, 0], calc_cc_points[i, 1]))
img = cv2.circle(img, (points[i, 0], points[i, 1]), 5, (255, 255, 0), -1)
cv2.imshow(win_name, img)
@staticmethod
def calc_points(points, column) -> np.ndarray:
"""
分割するときは切り捨てにしている
columnが8ならブロックサークルの分割,16なら交点サークルの分割をする.それ以外だとすべて0の座標を返す.
:param points:
:param column:
:return:
"""
circle_points = np.zeros((column, 2), dtype=int)
x_distance = points[1, 0] - points[0, 0] # xの距離
y_distance = points[2, 1] - points[0, 1] # yの距離
# print('前{0}'.format(circle_points))
if column == 8: # ブロックサークルのとき
# 交点サークルは,1/3に分割
# x座標
circle_points[0, 0] = circle_points[3, 0] = circle_points[5, 0] = points[0, 0]
circle_points[1, 0] = circle_points[6, 0] = x_distance * 1 // 2 + points[0, 0]
circle_points[2, 0] = circle_points[4, 0] = circle_points[7, 0] = points[1, 0]
# y座標
circle_points[0:4, 1] = points[0, 1]
circle_points[3:5, 1] = y_distance * 1 // 2 + points[0, 1]
circle_points[5:, 1] = points[3, 1]
elif column == 16: # 交点サークルのとき
# ブロックサークルは,半分に分割
# x座標
circle_points[0:13:4, 0] = points[0, 0]
circle_points[1:14:4, 0] = x_distance * 1 // 3 + points[0, 0]
circle_points[2:15:4, 0] = x_distance * 2 // 3 + points[0, 0]
circle_points[3:16:4, 0] = points[3, 0]
# y座標
circle_points[0:4, 1] = points[0, 1]
circle_points[4:8, 1] = y_distance * 1 // 3 + points[0, 1]
circle_points[8:12, 1] = y_distance * 2 // 3 + points[0, 1]
circle_points[12:, 1] = points[3, 1]
else:
return circle_points
# print("座標計算結果:{}".format(circle_points))
return circle_points
@staticmethod
def drag_and_drop_square(event, x, y, flags, param) -> None:
"""
画像上でドラッグアンドドロップした範囲に対して四角形を描画する
:param event: CV_EVENT_* の内の1つ
:param x: 画像座標系におけるマウスポインタのX座標
:param y: 画像座標系におけるマウスポインタのY座標
:param flags: CV_EVENT_FLAG_* の論理和
:param param: コールバック関数に渡される,ユーザ定義パラメータ
:return: None
"""
window_name, img, get_circle_point = param
h = img.shape[0] # 画像の高さを取る
w = img.shape[1] # 画像の幅を取る
if event == cv2.EVENT_MOUSEMOVE and get_circle_point.circle_mode <= 1: # マウスが動いたときかつ囲むモードのとき
img_copy = np.copy(img)
h = img_copy.shape[0] # 画像の高さを取る
w = img_copy.shape[1] # 画像の幅を取る
# cv2.line(画像, (x1, y1), (x2, y2), (r, g, b))
cv2.line(img_copy, (x, 0), (x, h), (0, 0, 255)) # 縦の線
cv2.line(img_copy, (0, y), (w, y), (0, 0, 255)) # 横の線
if get_circle_point.mode: # ドラッグ・アンド・ドロップで範囲指定モードがTrueなら
# cv2.rectangle(画像, 起点の(x, y), 終点の(x, y), 線の色(r, g, b), 線の太さ)
cv2.rectangle(img_copy, (get_circle_point.ix, get_circle_point.iy), (x, y), (255, 0, 0), thickness=2)
cv2.imshow(window_name, img_copy)
if event == cv2.EVENT_LBUTTONDOWN and get_circle_point.circle_mode <= 1: # 左ボタンを押下したとき
get_circle_point.ix, get_circle_point.iy = x, y
print('起点の座標:({0}, {1})'.format(get_circle_point.ix, get_circle_point.iy))
get_circle_point.mode = True # ドラッグ・アンド・ドロップで範囲指定モードをON
if event == cv2.EVENT_LBUTTONUP and get_circle_point.circle_mode <= 1: # マウスの左ボタンを上げたとき
cv2.rectangle(img, (get_circle_point.ix, get_circle_point.iy), (x, y), (255, 0, 0), thickness=2) # 四角形を描画
get_circle_point.draw_points(img, get_circle_point.ix, get_circle_point.iy) # 始点の頂点
get_circle_point.draw_points(img, x, get_circle_point.iy) # 始点の横の頂点
get_circle_point.draw_points(img, get_circle_point.ix, y) # 終点の横の頂点
get_circle_point.draw_points(img, x, y) # 終点の頂点
# square_points = np.empty((4, 2)) # 0行目左上.1行目右上.2行目左下.3行目右下
ix, iy = get_circle_point.ix, get_circle_point.iy
if w / 2 <= ix and h / 2 >= iy: # 起点の座標が第一象限のとき
square_points = np.array([[x, iy], [ix, iy], [x, y], [ix, y]])
elif w / 2 >= ix and h / 2 >= iy: # 起点の座標が第二象限のとき
# print('{}'.format('第二象限'))
square_points = np.array([[ix, iy], [x, iy], [ix, y], [x, y]])
elif w / 2 >= ix and h / 2 <= iy: # 起点の座標が第三象限のとき
square_points = np.array([[ix, y], [x, y], [ix, iy], [x, iy]])
else: # それ以外(起点の座標が第四象限のとき)
square_points = np.array([[x, y], [ix, y], [x, iy], [ix, iy]])
if get_circle_point.circle_mode == 0: # 交点サークルを囲むモード
get_circle_point.cc_points = get_circle_point.calc_points(points=square_points, column=16)
get_circle_point.draw_all_points(img=img, points=get_circle_point.cc_points, win_name=window_name)
elif get_circle_point.circle_mode == 1: # ブロックサークルを囲むモード
get_circle_point.bc_points = get_circle_point.calc_points(points=square_points, column=8)
get_circle_point.draw_all_points(img=img, points=get_circle_point.bc_points, win_name=window_name)
# print('代入前:{0}'.format(get_circle_point.named_points))
get_circle_point.add()
# print('代入後:{0}'.format(get_circle_point.named_points))
else:
print('m9( ´,_ゝ`)プッ')
get_circle_point.circle_mode += 1
get_circle_point.mode = False # ドラッグ・アンド・ドロップで範囲指定モードをOFF
def run(self):
img = './../img/clip_field.png'
window_name = 'WindowDAYO'
img = cv2.imread(img)
get_circle_point = GetCirclePoint()
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, get_circle_point.drag_and_drop_square,
[window_name, img, get_circle_point])
cv2.imshow(window_name, img)
cv2.moveWindow(window_name, 100, 100) # 左上にウィンドウを出す
cv2.waitKey()
print(get_circle_point.named_points)
# 自分をmain関数だと思っている精神異常者
def main():
img = './../img/clip_field.png'
window_name = 'WindowDAYO'
img = cv2.imread(img)
get_circle_point = GetCirclePoint()
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, get_circle_point.drag_and_drop_square,
[window_name, img, get_circle_point])
cv2.imshow(window_name, img)
cv2.moveWindow(window_name, 100, 100) # 左上にウィンドウを出す
cv2.waitKey()
print(get_circle_point.named_points)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
[
"cv2.cv2.namedWindow",
"cv2.cv2.destroyAllWindows",
"cv2.cv2.circle",
"cv2.cv2.waitKey",
"numpy.copy",
"cv2.cv2.line",
"numpy.empty",
"cv2.cv2.rectangle",
"cv2.cv2.setMouseCallback",
"numpy.zeros",
"cv2.cv2.moveWindow",
"numpy.array",
"cv2.cv2.imread",
"cv2.cv2.imshow"
] |
[((8904, 8919), 'cv2.cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (8914, 8919), True, 'import cv2.cv2 as cv2\n'), ((8964, 8992), 'cv2.cv2.namedWindow', 'cv2.namedWindow', (['window_name'], {}), '(window_name)\n', (8979, 8992), True, 'import cv2.cv2 as cv2\n'), ((8997, 9112), 'cv2.cv2.setMouseCallback', 'cv2.setMouseCallback', (['window_name', 'get_circle_point.drag_and_drop_square', '[window_name, img, get_circle_point]'], {}), '(window_name, get_circle_point.drag_and_drop_square, [\n window_name, img, get_circle_point])\n', (9017, 9112), True, 'import cv2.cv2 as cv2\n'), ((9137, 9165), 'cv2.cv2.imshow', 'cv2.imshow', (['window_name', 'img'], {}), '(window_name, img)\n', (9147, 9165), True, 'import cv2.cv2 as cv2\n'), ((9170, 9207), 'cv2.cv2.moveWindow', 'cv2.moveWindow', (['window_name', '(100)', '(100)'], {}), '(window_name, 100, 100)\n', (9184, 9207), True, 'import cv2.cv2 as cv2\n'), ((9227, 9240), 'cv2.cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (9238, 9240), True, 'import cv2.cv2 as cv2\n'), ((9286, 9309), 'cv2.cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9307, 9309), True, 'import cv2.cv2 as cv2\n'), ((406, 473), 'numpy.empty', 'np.empty', (['(self.CROSS_CIRCLE_POINTS, self.POINTS_NUMBER)'], {'dtype': 'int'}), '((self.CROSS_CIRCLE_POINTS, self.POINTS_NUMBER), dtype=int)\n', (414, 473), True, 'import numpy as np\n'), ((509, 576), 'numpy.empty', 'np.empty', (['(self.BLOCK_CIRCLE_POINTS, self.POINTS_NUMBER)'], {'dtype': 'int'}), '((self.BLOCK_CIRCLE_POINTS, self.POINTS_NUMBER), dtype=int)\n', (517, 576), True, 'import numpy as np\n'), ((3057, 3089), 'numpy.zeros', 'np.zeros', (['(column, 2)'], {'dtype': 'int'}), '((column, 2), dtype=int)\n', (3065, 3089), True, 'import numpy as np\n'), ((8379, 8394), 'cv2.cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (8389, 8394), True, 'import cv2.cv2 as cv2\n'), ((8447, 8475), 'cv2.cv2.namedWindow', 'cv2.namedWindow', (['window_name'], {}), '(window_name)\n', (8462, 8475), True, 'import cv2.cv2 as cv2\n'), ((8484, 8599), 'cv2.cv2.setMouseCallback', 'cv2.setMouseCallback', (['window_name', 'get_circle_point.drag_and_drop_square', '[window_name, img, get_circle_point]'], {}), '(window_name, get_circle_point.drag_and_drop_square, [\n window_name, img, get_circle_point])\n', (8504, 8599), True, 'import cv2.cv2 as cv2\n'), ((8632, 8660), 'cv2.cv2.imshow', 'cv2.imshow', (['window_name', 'img'], {}), '(window_name, img)\n', (8642, 8660), True, 'import cv2.cv2 as cv2\n'), ((8669, 8706), 'cv2.cv2.moveWindow', 'cv2.moveWindow', (['window_name', '(100)', '(100)'], {}), '(window_name, 100, 100)\n', (8683, 8706), True, 'import cv2.cv2 as cv2\n'), ((8730, 8743), 'cv2.cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (8741, 8743), True, 'import cv2.cv2 as cv2\n'), ((2679, 2746), 'cv2.cv2.circle', 'cv2.circle', (['img', '(points[i, 0], points[i, 1])', '(5)', '(255, 255, 0)', '(-1)'], {}), '(img, (points[i, 0], points[i, 1]), 5, (255, 255, 0), -1)\n', (2689, 2746), True, 'import cv2.cv2 as cv2\n'), ((2759, 2784), 'cv2.cv2.imshow', 'cv2.imshow', (['win_name', 'img'], {}), '(win_name, img)\n', (2769, 2784), True, 'import cv2.cv2 as cv2\n'), ((5148, 5160), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (5155, 5160), True, 'import numpy as np\n'), ((5322, 5369), 'cv2.cv2.line', 'cv2.line', (['img_copy', '(x, 0)', '(x, h)', '(0, 0, 255)'], {}), '(img_copy, (x, 0), (x, h), (0, 0, 255))\n', (5330, 5369), True, 'import cv2.cv2 as cv2\n'), ((5389, 5436), 'cv2.cv2.line', 'cv2.line', (['img_copy', '(0, y)', '(w, y)', '(0, 0, 255)'], {}), '(img_copy, (0, y), (w, y), (0, 0, 255))\n', (5397, 5436), True, 'import cv2.cv2 as cv2\n'), ((5722, 5755), 'cv2.cv2.imshow', 'cv2.imshow', (['window_name', 'img_copy'], {}), '(window_name, img_copy)\n', (5732, 5755), True, 'import cv2.cv2 as cv2\n'), ((6179, 6279), 'cv2.cv2.rectangle', 'cv2.rectangle', (['img', '(get_circle_point.ix, get_circle_point.iy)', '(x, y)', '(255, 0, 0)'], {'thickness': '(2)'}), '(img, (get_circle_point.ix, get_circle_point.iy), (x, y), (255,\n 0, 0), thickness=2)\n', (6192, 6279), True, 'import cv2.cv2 as cv2\n'), ((5608, 5713), 'cv2.cv2.rectangle', 'cv2.rectangle', (['img_copy', '(get_circle_point.ix, get_circle_point.iy)', '(x, y)', '(255, 0, 0)'], {'thickness': '(2)'}), '(img_copy, (get_circle_point.ix, get_circle_point.iy), (x, y),\n (255, 0, 0), thickness=2)\n', (5621, 5713), True, 'import cv2.cv2 as cv2\n'), ((6835, 6881), 'numpy.array', 'np.array', (['[[x, iy], [ix, iy], [x, y], [ix, y]]'], {}), '([[x, iy], [ix, iy], [x, y], [ix, y]])\n', (6843, 6881), True, 'import numpy as np\n'), ((7022, 7068), 'numpy.array', 'np.array', (['[[ix, iy], [x, iy], [ix, y], [x, y]]'], {}), '([[ix, iy], [x, iy], [ix, y], [x, y]])\n', (7030, 7068), True, 'import numpy as np\n'), ((7164, 7210), 'numpy.array', 'np.array', (['[[ix, y], [x, y], [ix, iy], [x, iy]]'], {}), '([[ix, y], [x, y], [ix, iy], [x, iy]])\n', (7172, 7210), True, 'import numpy as np\n'), ((7284, 7330), 'numpy.array', 'np.array', (['[[x, y], [ix, y], [x, iy], [ix, iy]]'], {}), '([[x, y], [ix, y], [x, iy], [ix, iy]])\n', (7292, 7330), True, 'import numpy as np\n')]
|
# --------------------------------------------------------
# LightHuBERT: Lightweight and Configurable Speech Representation Learning with Once-for-All Hidden-Unit BERT (https://arxiv.org/pdf/2203.15610.pdf)
# Github source: https://github.com/mechanicalsea/lighthubert
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/pytorch/fairseq
# --------------------------------------------------------
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn as nn
from lighthubert.functional.sliding_attn import (
global_attention_forward, slide_window_attention_forward)
from lighthubert.modules.fairseq_modules import quant_noise
from lighthubert.modules.scaling_linear import SLinear
from torch import Tensor
class SMHA(nn.Module):
"""SMHA (Scaling MultiheadAttention): variable input (i.e., output) size and heads number.
where in_embed_dim = out_embed_dim, qkv_embed_dim = 64 * num_heads
wav2vec2:MultiheadAttention(
embed_dim,
num_heads,
dropout=...,
self_attention=True,
)
Module: self.k_proj, self.v_proj, self.q_proj, self.out_proj
None: self.bias_k, self.bias_v
__base__: fairseq.modules.MultiheadAttention
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0, # if 0, no quant_noise
qn_block_size=8,
sliding_attn_window = "global",
slide_mode = "stride",
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = nn.Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
SLinear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
) # lighthubert component
self.v_proj = quant_noise(
SLinear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
) # lighthubert component
self.q_proj = quant_noise(
SLinear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
) # lighthubert component
self.out_proj = quant_noise(
SLinear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
) # lighthubert component
if add_bias_kv:
self.bias_k = nn.Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = nn.Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
# scaling attention
self.slide_mode = slide_mode
self.sliding_attn_window = sliding_attn_window
self.sample_qkv_embed_dim = None
self.sample_num_heads = None
self.sample_in_embed_dim = None
self.sample_attn_swz = "global"
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def set_slide_mode(self, slide_mode):
assert slide_mode in ["stride", "mask"]
self.slide_mode = slide_mode
def set_sample_config(
self,
sample_qkv_embed_dim: int,
sample_num_heads: int,
sample_in_embed_dim: int,
sample_attn_swz = "global",
):
if sample_qkv_embed_dim is None:
sample_qkv_embed_dim = self.embed_dim
if sample_num_heads is None:
sample_num_heads = self.num_heads
if sample_in_embed_dim is None:
sample_in_embed_dim = self.embed_dim
if sample_attn_swz is None or sample_attn_swz == "global":
sample_attn_swz = "global"
assert sample_num_heads * 64 == sample_qkv_embed_dim, ValueError(
f"heads num {sample_num_heads} * 64 != qkv dim {sample_qkv_embed_dim}"
)
self.sample_qkv_embed_dim = sample_qkv_embed_dim
self.sample_num_heads = sample_num_heads
self.sample_in_embed_dim = sample_in_embed_dim
self.sample_attn_swz = sample_attn_swz
self._sample_parameters()
def _sample_parameters(self):
self.k_proj.set_sample_config(
self.sample_in_embed_dim, self.sample_qkv_embed_dim
)
self.v_proj.set_sample_config(
self.sample_in_embed_dim, self.sample_qkv_embed_dim
)
self.q_proj.set_sample_config(
self.sample_in_embed_dim, self.sample_qkv_embed_dim
)
self.out_proj.set_sample_config(
self.sample_qkv_embed_dim, self.sample_in_embed_dim
)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Implement via fairseq multihead_attention with longformer's sliding attention window
Args:
query, key, value: (seq_len, batch_size, hidden_dim)
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
binary ByteTensor of shape `(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask: 2D or 3D mask that prevents attention to certain positions.
A 2D mask will be broadcasted for all the batches while a 3D mask
allows to specify a different mask for the entries of each batch.
When the value is 1, the corresponding value on the attention
layer will be added with -1e4 (float16) or -1e8 (float32) or -1e2 (float8).
`attn_mask[tgt_i, src_j] = 1` means that when calculating the embedding for `tgt_i`,
we exclude (mask out) `src_j`. This is useful for strided self-attention.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
if self.sample_in_embed_dim is None:
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
else:
assert embed_dim == self.sample_in_embed_dim, f"query dim {embed_dim} != {self.sample_in_embed_dim}"
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
# incremental_state is None
assert incremental_state is None
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k[:,:,:k.size(-1)].repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v[:,:,:v.size(-1)].repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
sample_num_heads = self.num_heads if self.sample_num_heads is None else self.sample_num_heads
q = (
q.contiguous()
.view(tgt_len, bsz * sample_num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * sample_num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * sample_num_heads, self.head_dim)
.transpose(0, 1)
)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
if self.sample_attn_swz == "global":
attn, unnormalized_attn_weights = global_attention_forward(
q, k, v,
attn_mask=attn_mask, key_padding_mask=key_padding_mask,
num_heads=sample_num_heads, dropout_p=self.dropout_module.p,
training=self.training,
)
else:
attn, unnormalized_attn_weights = slide_window_attention_forward(
q, k, v, self.sample_attn_swz,
attn_mask=attn_mask, key_padding_mask=key_padding_mask,
num_heads=sample_num_heads, dropout_p=self.dropout_module.p,
training=self.training, mode=self.slide_mode,
)
assert list(attn.size()) == [bsz * sample_num_heads, tgt_len, self.head_dim]
attn_embed_dim = embed_dim
if self.sample_qkv_embed_dim is not None:
attn_embed_dim = self.sample_qkv_embed_dim
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, attn_embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, attn_embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights and isinstance(unnormalized_attn_weights, Tensor):
wsz = src_len
if self.slide_mode == "stride" and not self.sample_attn_swz == "global":
wsz = self.sample_attn_swz + 1
attn_weights = unnormalized_attn_weights.view(
bsz, sample_num_heads, tgt_len, wsz
)
return attn, attn_weights
def calc_sampled_param_num(self):
total_params = 0
total_params += self.k_proj.calc_sampled_param_num()
total_params += self.v_proj.calc_sampled_param_num()
total_params += self.q_proj.calc_sampled_param_num()
total_params += self.out_proj.calc_sampled_param_num()
return total_params
def get_complexity(self, sequence_length):
total_flops = 0
total_flops += self.k_proj.get_complexity(sequence_length)
total_flops += self.v_proj.get_complexity(sequence_length)
total_flops += self.q_proj.get_complexity(sequence_length)
total_flops += self.out_proj.get_complexity(sequence_length)
# attn
swa = self.sample_attn_swz
if swa == "global" or sequence_length <= swa // 2 + 1:
swa = sequence_length
total_flops += sequence_length * swa * self.sample_qkv_embed_dim
# x
total_flops += sequence_length * swa * self.sample_qkv_embed_dim
return total_flops
if __name__ == "__main__":
m = SMHA(768, 12)
m.set_sample_config(768, 12, 768, 32)
x = torch.empty((2, 5, 768))
m(x, x, x)
|
[
"torch.nn.Dropout",
"math.sqrt",
"lighthubert.modules.scaling_linear.SLinear",
"torch.jit.is_scripting",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.xavier_normal_",
"torch.empty",
"torch.nn.init.constant_",
"lighthubert.functional.sliding_attn.slide_window_attention_forward",
"torch.Tensor",
"lighthubert.functional.sliding_attn.global_attention_forward"
] |
[((14871, 14895), 'torch.empty', 'torch.empty', (['(2, 5, 768)'], {}), '((2, 5, 768))\n', (14882, 14895), False, 'import torch\n'), ((2024, 2043), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2034, 2043), True, 'import torch.nn as nn\n'), ((4413, 4458), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.out_proj.weight'], {}), '(self.out_proj.weight)\n', (4436, 4458), True, 'import torch.nn as nn\n'), ((2582, 2622), 'lighthubert.modules.scaling_linear.SLinear', 'SLinear', (['self.kdim', 'embed_dim'], {'bias': 'bias'}), '(self.kdim, embed_dim, bias=bias)\n', (2589, 2622), False, 'from lighthubert.modules.scaling_linear import SLinear\n'), ((2728, 2768), 'lighthubert.modules.scaling_linear.SLinear', 'SLinear', (['self.vdim', 'embed_dim'], {'bias': 'bias'}), '(self.vdim, embed_dim, bias=bias)\n', (2735, 2768), False, 'from lighthubert.modules.scaling_linear import SLinear\n'), ((2874, 2914), 'lighthubert.modules.scaling_linear.SLinear', 'SLinear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (2881, 2914), False, 'from lighthubert.modules.scaling_linear import SLinear\n'), ((3023, 3063), 'lighthubert.modules.scaling_linear.SLinear', 'SLinear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (3030, 3063), False, 'from lighthubert.modules.scaling_linear import SLinear\n'), ((4248, 4291), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.k_proj.weight'], {}), '(self.k_proj.weight)\n', (4271, 4291), True, 'import torch.nn as nn\n'), ((4304, 4347), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.v_proj.weight'], {}), '(self.v_proj.weight)\n', (4327, 4347), True, 'import torch.nn as nn\n'), ((4360, 4403), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.q_proj.weight'], {}), '(self.q_proj.weight)\n', (4383, 4403), True, 'import torch.nn as nn\n'), ((4514, 4556), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.out_proj.bias', '(0.0)'], {}), '(self.out_proj.bias, 0.0)\n', (4531, 4556), True, 'import torch.nn as nn\n'), ((4605, 4640), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.bias_k'], {}), '(self.bias_k)\n', (4627, 4640), True, 'import torch.nn as nn\n'), ((4689, 4724), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.bias_v'], {}), '(self.bias_v)\n', (4711, 4724), True, 'import torch.nn as nn\n'), ((12051, 12235), 'lighthubert.functional.sliding_attn.global_attention_forward', 'global_attention_forward', (['q', 'k', 'v'], {'attn_mask': 'attn_mask', 'key_padding_mask': 'key_padding_mask', 'num_heads': 'sample_num_heads', 'dropout_p': 'self.dropout_module.p', 'training': 'self.training'}), '(q, k, v, attn_mask=attn_mask, key_padding_mask=\n key_padding_mask, num_heads=sample_num_heads, dropout_p=self.\n dropout_module.p, training=self.training)\n', (12075, 12235), False, 'from lighthubert.functional.sliding_attn import global_attention_forward, slide_window_attention_forward\n'), ((12366, 12605), 'lighthubert.functional.sliding_attn.slide_window_attention_forward', 'slide_window_attention_forward', (['q', 'k', 'v', 'self.sample_attn_swz'], {'attn_mask': 'attn_mask', 'key_padding_mask': 'key_padding_mask', 'num_heads': 'sample_num_heads', 'dropout_p': 'self.dropout_module.p', 'training': 'self.training', 'mode': 'self.slide_mode'}), '(q, k, v, self.sample_attn_swz, attn_mask=\n attn_mask, key_padding_mask=key_padding_mask, num_heads=\n sample_num_heads, dropout_p=self.dropout_module.p, training=self.\n training, mode=self.slide_mode)\n', (12396, 12605), False, 'from lighthubert.functional.sliding_attn import global_attention_forward, slide_window_attention_forward\n'), ((3186, 3215), 'torch.Tensor', 'torch.Tensor', (['(1)', '(1)', 'embed_dim'], {}), '(1, 1, embed_dim)\n', (3198, 3215), False, 'import torch\n'), ((3256, 3285), 'torch.Tensor', 'torch.Tensor', (['(1)', '(1)', 'embed_dim'], {}), '(1, 1, embed_dim)\n', (3268, 3285), False, 'import torch\n'), ((8467, 8491), 'torch.jit.is_scripting', 'torch.jit.is_scripting', ([], {}), '()\n', (8489, 8491), False, 'import torch\n'), ((4050, 4062), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4059, 4062), False, 'import math\n'), ((4129, 4141), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4138, 4141), False, 'import math\n'), ((4208, 4220), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4217, 4220), False, 'import math\n')]
|
#!/usr/bin/env python
# Copyright (C) 2009 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA.
# Integrated terminal GUI and shell.
import cgi
import os
import fcntl
import signal
import struct
import termios
import time
import traceback
import gobject
import gtk
import vte
import pyrepl.unix_console
import errorgui
import jobcontrol
import shell
import shell_event
import shell_pyrepl
def openpty():
master_fd, slave_fd = os.openpty()
return os.fdopen(master_fd, "w"), os.fdopen(slave_fd, "w")
class VTEConsole(pyrepl.unix_console.UnixConsole):
def __init__(self, terminal):
self._terminal = terminal
pyrepl.unix_console.UnixConsole.__init__(self, f_in=None, term="xterm")
# TODO: Don't use __ attributes in UnixConsole
def flushoutput(self):
for text, iscode in self._UnixConsole__buffer:
self._terminal.feed(text.encode(self.encoding))
del self._UnixConsole__buffer[:]
def _update_size(self):
pass
class JobMessageOutput(object):
def __init__(self, terminal):
self._terminal = terminal
def write(self, data):
self._terminal.feed(data.replace("\n", "\n\r"))
def forward_output_to_terminal(master_fd, terminal):
def on_avail(*args):
try:
data = os.read(master_fd.fileno(), 1024)
except OSError:
return False
else:
terminal.feed(data)
return len(data) > 0
gobject.io_add_watch(
master_fd.fileno(), gobject.IO_IN | gobject.IO_HUP | gobject.IO_NVAL,
on_avail)
def set_terminal_size(tty_fd, width, height):
fcntl.ioctl(tty_fd, termios.TIOCSWINSZ,
struct.pack("HHHH", height, width, 0, 0))
# Tango theme, from gnome-terminal's terminal-profile.c.
colours = [
(0x2e2e, 0x3434, 0x3636),
(0xcccc, 0x0000, 0x0000),
(0x4e4e, 0x9a9a, 0x0606),
(0xc4c4, 0xa0a0, 0x0000),
(0x3434, 0x6565, 0xa4a4),
(0x7575, 0x5050, 0x7b7b),
(0x0606, 0x9820, 0x9a9a),
(0xd3d3, 0xd7d7, 0xcfcf),
(0x5555, 0x5757, 0x5353),
(0xefef, 0x2929, 0x2929),
(0x8a8a, 0xe2e2, 0x3434),
(0xfcfc, 0xe9e9, 0x4f4f),
(0x7272, 0x9f9f, 0xcfcf),
(0xadad, 0x7f7f, 0xa8a8),
(0x3434, 0xe2e2, 0xe2e2),
(0xeeee, 0xeeee, 0xecec),
]
# Constants apparently missing from Python bindings.
VTE_ERASE_AUTO = 0
VTE_ERASE_ASCII_BACKSPACE = 1
VTE_ERASE_ASCII_DELETE = 2
VTE_ERASE_DELETE_SEQUENCE = 3
VTE_ERASE_TTY = 4
class TerminalWidget(object):
def __init__(self, parts):
self._terminal = vte.Terminal()
# set_pty() seems to set up backspace, but we're not using it.
# Need ASCII_DELETE rather than ASCII_BACKSPACE if we want
# Alt-Backspace to work.
self._terminal.set_backspace_binding(VTE_ERASE_ASCII_DELETE)
self._writer = JobMessageOutput(self._terminal)
self._console = VTEConsole(self._terminal)
self.title = shell_event.ObservableCell("")
parts["job_output"] = self._writer
parts["job_tty"] = None
parts["job_spawner"] = None # There is no single job spawner.
environ = os.environ.copy()
environ["TERM"] = "xterm"
parts.setdefault("environ", environ)
parts.setdefault("real_cwd", shell.LocalCwdTracker())
self._shell = shell.Shell(parts)
self._reader = shell_pyrepl.Reader(
self._shell.get_prompt, self._shell.completer, self._console)
self._current_reader = None
self._current_resizer = lambda: None
self._read_pending = lambda: None
self._read_input()
self._shell.job_controller.add_done_handler(self._job_done)
self._terminal.connect("commit", self._on_user_input)
self._terminal.connect("size_allocate", self._on_size_change)
scrollbar = gtk.VScrollbar()
scrollbar.set_adjustment(self._terminal.get_adjustment())
self._hbox = gtk.HBox()
self._hbox.pack_start(self._terminal, expand=True, fill=True)
self._hbox.pack_start(scrollbar, expand=False)
foreground = gtk.gdk.Color(0, 0, 0)
background = gtk.gdk.Color(0xffff, 0xffff, 0xffff)
palette = [gtk.gdk.Color(*colour) for colour in colours]
self._terminal.set_colors(foreground, background, palette)
self._terminal.set_scrollback_lines(4000)
# VTE widget's default includes no punctuation.
self._terminal.set_word_chars("-A-Za-z0-9,./?%&#:_")
self._hbox.show_all()
self._on_finished = shell_event.EventDistributor()
self.add_finished_handler = self._on_finished.add
self._on_attention = shell_event.EventDistributor()
self.add_attention_handler = self._on_attention.add
def clone(self):
return TerminalWidget({"environ": self._shell.environ.copy(),
"real_cwd": self._shell.real_cwd.copy(),
"history": self._shell.history})
def set_hints(self, window):
pad_x, pad_y = self._terminal.get_padding()
char_x = self._terminal.get_char_width()
char_y = self._terminal.get_char_height()
window.set_geometry_hints(
self._terminal,
min_width=pad_x + char_x * 2,
min_height=pad_y + char_y * 2,
max_width=-1, max_height=-1,
base_width=pad_x, base_height=pad_y,
width_inc=char_x, height_inc=char_y,
min_aspect=-1, max_aspect=-1)
window.set_focus(self._terminal)
def get_widget(self):
return self._hbox
def get_terminal_widget(self):
return self._terminal
def _read_input(self):
self._shell.job_controller.print_messages()
self._reader.prepare()
self._reader.refresh()
self._current_reader = self._on_readline_input
self._current_resizer = lambda: None
self.title.set(self._shell.get_title())
def _on_user_input(self, widget_unused, data, size):
self._current_reader(data)
def _on_size_change(self, *args):
self._console.width = self._terminal.get_column_count()
self._console.height = self._terminal.get_row_count()
self._current_resizer()
def _on_readline_input(self, data):
# This is pretty ugly. This mixture of push and pull driven
# styles doesn't work very well.
for key in data:
self._console.event_queue.push(key)
while not self._console.event_queue.empty():
self._reader.clear_error()
event = self._console.event_queue.get()
if event is not None:
self._reader.input_trans.push(event)
cmd = self._reader.input_trans.get()
if cmd is not None:
try:
self._reader.do_cmd(cmd)
except EOFError:
self._on_finished.send()
if self._reader.finished:
self._reader.restore()
self._process_input(self._reader.get_buffer())
def _process_input(self, line):
master_fd, slave_fd = openpty()
# Setting O_NONBLOCK shouldn't be necessary, but poll() will
# sometimes report the FD as ready to read when reading it
# will block.
fcntl.fcntl(master_fd, fcntl.F_SETFL,
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
forward_output_to_terminal(master_fd, self._terminal)
def on_input(data):
os.write(master_fd.fileno(), data)
def update_size():
set_terminal_size(slave_fd, self._terminal.get_column_count(),
self._terminal.get_row_count())
def to_foreground():
self._current_reader = on_input
self._current_resizer = update_size
update_size()
def read_pending():
# Read pending data in case we received the process's exit
# status before reading from the tty in the main loop.
try:
data = os.read(master_fd.fileno(), 4096)
except OSError:
pass
else:
self._terminal.feed(data)
fds = {0: slave_fd, 1: slave_fd, 2: slave_fd}
to_foreground()
# TODO: Handle reading pending data from re-foregrounded jobs.
self._read_pending = read_pending
job_spawner = jobcontrol.SessionJobSpawner(
self._shell.wait_dispatcher, self._shell.job_controller, slave_fd,
to_foreground)
self._shell.job_controller.stop_waiting()
try:
self._shell.run_job_command(line, fds, job_spawner)
except Exception:
self._writer.write("".join(traceback.format_exc()))
self._shell.job_controller.check_for_done()
def _job_done(self):
self._on_attention.send()
self._read_pending()
self._read_input()
def get_menu_items(self):
item = gtk.MenuItem("Job To Background")
item.connect("activate", lambda *args: self._read_input())
return [item]
# Alert the user to completed commands that occur this much time after
# the last input.
ATTENTION_INPUT_DELAY = 1 # seconds
class TabLabel(object):
def __init__(self, text_obs):
self._attention = False
self._text = text_obs
self.widget = gtk.Label(text_obs.get())
text_obs.add_observer(self._update)
def _update(self):
text = cgi.escape(self._text.get())
if self._attention:
text = "<b>%s</b>" % text
self.widget.set_markup(text)
def set_attention(self, attention):
self._attention = attention
self._update()
class TerminalWindow(object):
def __init__(self, terminal):
self._tabset = gtk.Notebook()
self._tab_map = {}
self._window = gtk.Window()
self._window.add(self._tabset)
self._title = shell_event.ObservableRedirector(
shell_event.ObservableCell(""))
self._title.add_observer(
lambda: self._window.set_title(self._title.get()))
self._add_tab(terminal)
self._tabset.set_show_border(False)
self._tabset.set_property("homogeneous", True)
self._tabset.set_scrollable(True)
self._tabset.show_all()
terminal.set_hints(self._window)
self._window.connect("hide", self._on_hidden)
self._window.connect("key_press_event", self._clear_attention)
self._window.connect("focus_in_event", self._clear_attention)
self._tabset.connect("switch_page", self._on_switch_tab)
def _on_hidden(self, widged_unused):
self._window.destroy()
if not any(window.get_property("visible")
for window in gtk.window_list_toplevels()):
gtk.main_quit()
def _get_current_tab(self):
tab_widget = self._tabset.get_nth_page(self._tabset.get_current_page())
return self._tab_map[tab_widget]
def _clear_attention(self, *args):
self._get_current_tab()["clear_attention"]()
self._window.set_urgency_hint(False)
return False
def _on_switch_tab(self, unused1, unused2, index):
tab = self._tab_map[self._tabset.get_nth_page(index)]
tab["clear_attention"]()
self._title.set(tab["terminal"].title)
self._window.set_urgency_hint(False)
def _on_button_press(self, widget_unused, event):
self._clear_attention()
if event.button == 3:
self._make_menu().popup(None, None, None, event.button, event.time)
return True
return False
def _make_menu(self):
tab = self._get_current_tab()["terminal"]
menu = gtk.Menu()
item = gtk.MenuItem("Open _Terminal")
def new_window(*args):
TerminalWindow(tab.clone()).get_widget().show_all()
item.connect("activate", new_window)
menu.add(item)
item = gtk.MenuItem("Open Ta_b")
item.connect("activate", lambda *args: self._add_tab(tab.clone()))
menu.add(item)
for item in tab.get_menu_items():
menu.add(item)
menu.show_all()
return menu
def _update_tabs(self):
self._tabset.set_show_tabs(len(self._tab_map) > 1)
def _add_tab(self, terminal):
tab_widget = terminal.get_widget()
def clear_attention():
label.set_attention(False)
tab["last_input_time"] = time.time()
tab = {"terminal": terminal,
"clear_attention": clear_attention,
"last_input_time": time.time()}
self._tab_map[tab_widget] = tab
self._update_tabs()
self._title.set(terminal.title)
label = TabLabel(terminal.title)
label.widget.set_alignment(0, 0.5)
label.widget.set_ellipsize(True)
index = self._tabset.append_page(tab_widget, label.widget)
self._tabset.set_tab_label_packing(tab_widget, expand=True, fill=True,
pack_type=gtk.PACK_START)
self._tabset.set_current_page(index)
self._tabset.set_tab_reorderable(tab_widget, True)
terminal.get_terminal_widget().connect("button_press_event",
self._on_button_press)
terminal.get_terminal_widget().connect(
"popup_menu",
lambda widget: self._make_menu().popup(None, None, None, 0, 0))
def remove_tab():
self._tabset.remove_page(self._tabset.page_num(tab_widget))
del self._tab_map[tab_widget]
self._update_tabs()
if len(self._tab_map) == 0:
self._window.destroy()
def set_attention():
if time.time() > tab["last_input_time"] + ATTENTION_INPUT_DELAY:
self._window.set_urgency_hint(True)
# Only highlight tabs other than the current one.
if self._get_current_tab() != tab:
label.set_attention(True)
terminal.add_finished_handler(remove_tab)
terminal.add_attention_handler(set_attention)
def get_widget(self):
return self._window
def make_terminal(parts):
return TerminalWindow(TerminalWidget(parts))
def main():
signal.signal(signal.SIGHUP, signal.SIG_IGN)
gtk.window_set_default_icon_name("gnome-terminal")
parts = {"history": shell.History()}
make_terminal(parts).get_widget().show_all()
errorgui.set_excepthook()
gtk.main()
if __name__ == "__main__":
main()
|
[
"vte.Terminal",
"os.openpty",
"shell_event.EventDistributor",
"os.environ.copy",
"gtk.Window",
"shell_event.ObservableCell",
"shell.History",
"errorgui.set_excepthook",
"gtk.main_quit",
"struct.pack",
"gtk.gdk.Color",
"traceback.format_exc",
"gtk.window_set_default_icon_name",
"shell.Shell",
"gtk.MenuItem",
"gtk.window_list_toplevels",
"gtk.Menu",
"gtk.main",
"gtk.Notebook",
"signal.signal",
"jobcontrol.SessionJobSpawner",
"fcntl.fcntl",
"gtk.HBox",
"gtk.VScrollbar",
"shell.LocalCwdTracker",
"shell_pyrepl.Reader",
"time.time",
"os.fdopen"
] |
[((1100, 1112), 'os.openpty', 'os.openpty', ([], {}), '()\n', (1110, 1112), False, 'import os\n'), ((14954, 14998), 'signal.signal', 'signal.signal', (['signal.SIGHUP', 'signal.SIG_IGN'], {}), '(signal.SIGHUP, signal.SIG_IGN)\n', (14967, 14998), False, 'import signal\n'), ((15003, 15053), 'gtk.window_set_default_icon_name', 'gtk.window_set_default_icon_name', (['"""gnome-terminal"""'], {}), "('gnome-terminal')\n", (15035, 15053), False, 'import gtk\n'), ((15148, 15173), 'errorgui.set_excepthook', 'errorgui.set_excepthook', ([], {}), '()\n', (15171, 15173), False, 'import errorgui\n'), ((15178, 15188), 'gtk.main', 'gtk.main', ([], {}), '()\n', (15186, 15188), False, 'import gtk\n'), ((1124, 1149), 'os.fdopen', 'os.fdopen', (['master_fd', '"""w"""'], {}), "(master_fd, 'w')\n", (1133, 1149), False, 'import os\n'), ((1151, 1175), 'os.fdopen', 'os.fdopen', (['slave_fd', '"""w"""'], {}), "(slave_fd, 'w')\n", (1160, 1175), False, 'import os\n'), ((2346, 2386), 'struct.pack', 'struct.pack', (['"""HHHH"""', 'height', 'width', '(0)', '(0)'], {}), "('HHHH', height, width, 0, 0)\n", (2357, 2386), False, 'import struct\n'), ((3213, 3227), 'vte.Terminal', 'vte.Terminal', ([], {}), '()\n', (3225, 3227), False, 'import vte\n'), ((3596, 3626), 'shell_event.ObservableCell', 'shell_event.ObservableCell', (['""""""'], {}), "('')\n", (3622, 3626), False, 'import shell_event\n'), ((3790, 3807), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (3805, 3807), False, 'import os\n'), ((3971, 3989), 'shell.Shell', 'shell.Shell', (['parts'], {}), '(parts)\n', (3982, 3989), False, 'import shell\n'), ((4013, 4099), 'shell_pyrepl.Reader', 'shell_pyrepl.Reader', (['self._shell.get_prompt', 'self._shell.completer', 'self._console'], {}), '(self._shell.get_prompt, self._shell.completer, self.\n _console)\n', (4032, 4099), False, 'import shell_pyrepl\n'), ((4479, 4495), 'gtk.VScrollbar', 'gtk.VScrollbar', ([], {}), '()\n', (4493, 4495), False, 'import gtk\n'), ((4583, 4593), 'gtk.HBox', 'gtk.HBox', ([], {}), '()\n', (4591, 4593), False, 'import gtk\n'), ((4740, 4762), 'gtk.gdk.Color', 'gtk.gdk.Color', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4753, 4762), False, 'import gtk\n'), ((4784, 4818), 'gtk.gdk.Color', 'gtk.gdk.Color', (['(65535)', '(65535)', '(65535)'], {}), '(65535, 65535, 65535)\n', (4797, 4818), False, 'import gtk\n'), ((5179, 5209), 'shell_event.EventDistributor', 'shell_event.EventDistributor', ([], {}), '()\n', (5207, 5209), False, 'import shell_event\n'), ((5297, 5327), 'shell_event.EventDistributor', 'shell_event.EventDistributor', ([], {}), '()\n', (5325, 5327), False, 'import shell_event\n'), ((9073, 9188), 'jobcontrol.SessionJobSpawner', 'jobcontrol.SessionJobSpawner', (['self._shell.wait_dispatcher', 'self._shell.job_controller', 'slave_fd', 'to_foreground'], {}), '(self._shell.wait_dispatcher, self._shell.\n job_controller, slave_fd, to_foreground)\n', (9101, 9188), False, 'import jobcontrol\n'), ((9640, 9673), 'gtk.MenuItem', 'gtk.MenuItem', (['"""Job To Background"""'], {}), "('Job To Background')\n", (9652, 9673), False, 'import gtk\n'), ((10466, 10480), 'gtk.Notebook', 'gtk.Notebook', ([], {}), '()\n', (10478, 10480), False, 'import gtk\n'), ((10531, 10543), 'gtk.Window', 'gtk.Window', ([], {}), '()\n', (10541, 10543), False, 'import gtk\n'), ((12390, 12400), 'gtk.Menu', 'gtk.Menu', ([], {}), '()\n', (12398, 12400), False, 'import gtk\n'), ((12416, 12446), 'gtk.MenuItem', 'gtk.MenuItem', (['"""Open _Terminal"""'], {}), "('Open _Terminal')\n", (12428, 12446), False, 'import gtk\n'), ((12625, 12650), 'gtk.MenuItem', 'gtk.MenuItem', (['"""Open Ta_b"""'], {}), "('Open Ta_b')\n", (12637, 12650), False, 'import gtk\n'), ((15078, 15093), 'shell.History', 'shell.History', ([], {}), '()\n', (15091, 15093), False, 'import shell\n'), ((3924, 3947), 'shell.LocalCwdTracker', 'shell.LocalCwdTracker', ([], {}), '()\n', (3945, 3947), False, 'import shell\n'), ((4841, 4863), 'gtk.gdk.Color', 'gtk.gdk.Color', (['*colour'], {}), '(*colour)\n', (4854, 4863), False, 'import gtk\n'), ((10651, 10681), 'shell_event.ObservableCell', 'shell_event.ObservableCell', (['""""""'], {}), "('')\n", (10677, 10681), False, 'import shell_event\n'), ((11484, 11499), 'gtk.main_quit', 'gtk.main_quit', ([], {}), '()\n', (11497, 11499), False, 'import gtk\n'), ((13136, 13147), 'time.time', 'time.time', ([], {}), '()\n', (13145, 13147), False, 'import time\n'), ((13271, 13282), 'time.time', 'time.time', ([], {}), '()\n', (13280, 13282), False, 'import time\n'), ((8003, 8040), 'fcntl.fcntl', 'fcntl.fcntl', (['master_fd', 'fcntl.F_GETFL'], {}), '(master_fd, fcntl.F_GETFL)\n', (8014, 8040), False, 'import fcntl\n'), ((14422, 14433), 'time.time', 'time.time', ([], {}), '()\n', (14431, 14433), False, 'import time\n'), ((9401, 9423), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9421, 9423), False, 'import traceback\n'), ((11442, 11469), 'gtk.window_list_toplevels', 'gtk.window_list_toplevels', ([], {}), '()\n', (11467, 11469), False, 'import gtk\n')]
|
import sys
import os
import shutil
import urllib.error
import urllib.request
import zipfile
# ダウンロードする
def fileDownload(url,filepath):
try:
urllib.request.urlretrieve(url, filepath)
return 0
except urllib.error as e:
print(e)
print("Failed to download [" + url + "].")
sys.exit()
return 0
# 必要なビルドツールがあるかの確認
if os.path.exists("C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC") == False:
print("C++ build tool is not installed. Please install Microsoft C++ Build Tools.\nhttps://visualstudio.microsoft.com/ja/visual-cpp-build-tools/")
sys.exit()
# 保存先ディレクトリ作成
tempPath = "./temp/"
if os.path.exists(tempPath):
# 既にある場合は、先に丸ごと削除する
shutil.rmtree(tempPath)
os.mkdir(tempPath)
print("Start setup...")
# 必要なファイルのダウンロード
print("Downloading lid.176.bin...")
fileDownload("https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin", "./lid.176.bin")
print("Downloading runtime...")
fileDownload("https://www.python.org/ftp/python/3.9.7/python-3.9.7-embed-amd64.zip", tempPath + "runtime.zip")
# runtimeの解凍
print("Unzipping runtime...")
with zipfile.ZipFile(tempPath + "runtime.zip") as f:
f.extractall('./runtime')
# さらにその中にfasttextをクローン
print("Downloading fasttext...")
fileDownload("https://github.com/facebookresearch/fastText/archive/refs/heads/master.zip", tempPath + "fasttext.zip")
print("Unzipping fasttext...")
with zipfile.ZipFile(tempPath + "fasttext.zip") as f:
f.extractall(tempPath + "fasttext")
# runtimeでpipを使えるようにする
print("Downloading get-pip.py...")
fileDownload("https://bootstrap.pypa.io/get-pip.py", tempPath + "get-pip.py")
with open("./runtime/python39._pth", "a") as f:
f.write("import site")
# バッチに投げる
print("Calling setruntime.bat...")
os.system(".\\setruntime.bat " + sys.exec_prefix)
# 作業フォルダ削除
shutil.rmtree(tempPath)
|
[
"os.mkdir",
"zipfile.ZipFile",
"os.path.exists",
"os.system",
"shutil.rmtree",
"sys.exit"
] |
[((680, 704), 'os.path.exists', 'os.path.exists', (['tempPath'], {}), '(tempPath)\n', (694, 704), False, 'import os\n'), ((760, 778), 'os.mkdir', 'os.mkdir', (['tempPath'], {}), '(tempPath)\n', (768, 778), False, 'import os\n'), ((1790, 1839), 'os.system', 'os.system', (["('.\\\\setruntime.bat ' + sys.exec_prefix)"], {}), "('.\\\\setruntime.bat ' + sys.exec_prefix)\n", (1799, 1839), False, 'import os\n'), ((1852, 1875), 'shutil.rmtree', 'shutil.rmtree', (['tempPath'], {}), '(tempPath)\n', (1865, 1875), False, 'import shutil\n'), ((369, 473), 'os.path.exists', 'os.path.exists', (['"""C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC"""'], {}), "(\n 'C:/Program Files (x86)/Microsoft Visual Studio/2019/BuildTools/VC/Tools/MSVC'\n )\n", (383, 473), False, 'import os\n'), ((629, 639), 'sys.exit', 'sys.exit', ([], {}), '()\n', (637, 639), False, 'import sys\n'), ((734, 757), 'shutil.rmtree', 'shutil.rmtree', (['tempPath'], {}), '(tempPath)\n', (747, 757), False, 'import shutil\n'), ((1153, 1194), 'zipfile.ZipFile', 'zipfile.ZipFile', (["(tempPath + 'runtime.zip')"], {}), "(tempPath + 'runtime.zip')\n", (1168, 1194), False, 'import zipfile\n'), ((1442, 1484), 'zipfile.ZipFile', 'zipfile.ZipFile', (["(tempPath + 'fasttext.zip')"], {}), "(tempPath + 'fasttext.zip')\n", (1457, 1484), False, 'import zipfile\n'), ((318, 328), 'sys.exit', 'sys.exit', ([], {}), '()\n', (326, 328), False, 'import sys\n')]
|
from unittest import TestCase
from tests import MockApp
from i18n.i18n import I18n, translate, is_translation_available
class I18nTest(TestCase):
@classmethod
def setUpClass(klass):
I18n(MockApp())
# translation available
def test_returns_false_if_translation_not_available_for_symbol(self):
self.assertFalse(is_translation_available('foobar', 'en-US'))
def test_returns_false_if_translation_file_not_available(self):
self.assertFalse(is_translation_available('coursesLabel', 'foobar'))
def test_returns_true_if_locale_is_found_and_symbol_exists(self):
self.assertTrue(is_translation_available('coursesLabel', 'en-US'))
# translate
def test_returns_default_fallback_locale_when_locale_file_is_not_found(self):
self.assertEqual(
'Courses',
translate('coursesLabel', 'unknown-Locale')
)
def test_returns_an_error_string_when_symbol_is_not_found_for_locale(self):
self.assertEqual(
'Translation not found for doesntExist in en-US',
translate('doesntExist', 'en-US')
)
def test_returns_translated_string_when_symbol_is_found_for_locale(self):
self.assertEqual('Courses', translate('coursesLabel', 'en-US'))
self.assertEqual('Kurse', translate('coursesLabel', 'de-DE'))
|
[
"i18n.i18n.translate",
"tests.MockApp",
"i18n.i18n.is_translation_available"
] |
[((206, 215), 'tests.MockApp', 'MockApp', ([], {}), '()\n', (213, 215), False, 'from tests import MockApp\n'), ((345, 388), 'i18n.i18n.is_translation_available', 'is_translation_available', (['"""foobar"""', '"""en-US"""'], {}), "('foobar', 'en-US')\n", (369, 388), False, 'from i18n.i18n import I18n, translate, is_translation_available\n'), ((484, 534), 'i18n.i18n.is_translation_available', 'is_translation_available', (['"""coursesLabel"""', '"""foobar"""'], {}), "('coursesLabel', 'foobar')\n", (508, 534), False, 'from i18n.i18n import I18n, translate, is_translation_available\n'), ((631, 680), 'i18n.i18n.is_translation_available', 'is_translation_available', (['"""coursesLabel"""', '"""en-US"""'], {}), "('coursesLabel', 'en-US')\n", (655, 680), False, 'from i18n.i18n import I18n, translate, is_translation_available\n'), ((842, 885), 'i18n.i18n.translate', 'translate', (['"""coursesLabel"""', '"""unknown-Locale"""'], {}), "('coursesLabel', 'unknown-Locale')\n", (851, 885), False, 'from i18n.i18n import I18n, translate, is_translation_available\n'), ((1077, 1110), 'i18n.i18n.translate', 'translate', (['"""doesntExist"""', '"""en-US"""'], {}), "('doesntExist', 'en-US')\n", (1086, 1110), False, 'from i18n.i18n import I18n, translate, is_translation_available\n'), ((1236, 1270), 'i18n.i18n.translate', 'translate', (['"""coursesLabel"""', '"""en-US"""'], {}), "('coursesLabel', 'en-US')\n", (1245, 1270), False, 'from i18n.i18n import I18n, translate, is_translation_available\n'), ((1306, 1340), 'i18n.i18n.translate', 'translate', (['"""coursesLabel"""', '"""de-DE"""'], {}), "('coursesLabel', 'de-DE')\n", (1315, 1340), False, 'from i18n.i18n import I18n, translate, is_translation_available\n')]
|
from .base import Base
import os
class Prune(Base):
"""Prompts the user to prune their docker environment"""
def run(self):
os.system("docker system prune")
|
[
"os.system"
] |
[((144, 176), 'os.system', 'os.system', (['"""docker system prune"""'], {}), "('docker system prune')\n", (153, 176), False, 'import os\n')]
|
#!/usr/bin/env python3
"""
Author : <NAME> <<EMAIL>>
Date : 2021-03-14
Purpose: Rock the Casbah
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Jump the Five",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("str", metavar="str", help="Input Text")
return parser.parse_args()
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
pos_arg = args.str
# print(f"{pos_arg}")
jumper = dict()
jumper["1"] = "9"
jumper["2"] = "8"
jumper["3"] = "7"
jumper["4"] = "6"
jumper["5"] = "0"
jumper["6"] = "4"
jumper["7"] = "3"
jumper["8"] = "2"
jumper["9"] = "1"
jumper["0"] = "5"
for char in pos_arg:
print(jumper.get(char, f"{char}"), end="")
# print()
# --------------------------------------------------
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser"
] |
[((241, 354), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Jump the Five"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Jump the Five', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (264, 354), False, 'import argparse\n')]
|
from intMachine import intMachine
import sys
from time import sleep
from PIL import Image
from os import system
import imageio
ANIMATION = False
black_points = set()
new_points = set()
position = [0,5]
direction = 90
TX = 43
TY = 6
cont = 0
pics = []
def paintPoint(position, paint):
global black_points, new_points
new_points.add(tuple(position))
if paint == 0:
black_points.add(tuple(position))
else:
black_points.discard(tuple(position))
def move_robot(vector, steps=1):
global position, direction
direction += 90 if vector==0 else 270
direction %= 360
x,y = position
if direction == 0 : x+=steps
elif direction == 90 : y+=steps
elif direction == 180: x-=steps
elif direction == 270: y-=steps
else: print("mala direccion", direction); sys.exit()
position = [x,y]
def save_pic():
global black_points, cont, pics
wall = Image.new("1", (TX,TY))
for y in range(TY):
for x in range(TX):
c = 0 if (x,y) in black_points else 1
wall.putpixel((x,TY-1-y), c)
wall = wall.resize((wall.width * 50, wall.height*50))
wall.save(f"tmp/matricula{cont}.png")
pics.append(imageio.imread(f"tmp/matricula{cont}.png"))
cont += 1
with open("input", "r") as f:
program = [int(i) for i in f.readline().strip().split(',')]
color = 1
computer = intMachine(program, [color])
paint, vector = computer.run()
paintPoint(position, paint)
move_robot(vector)
if ANIMATION:
system("mkdir tmp")
save_pic()
while not computer.isHalted():
color = 0 if tuple(position) in black_points else 1
computer.appendStdin(color)
paint, vector = computer.re_run()[-2:]
paintPoint(position, paint)
move_robot(vector)
if ANIMATION: save_pic()
# Part 1
print(len(new_points))
# Part 2
wall = Image.new("1", (TX,TY))
for y in range(TY):
for x in range(TX):
c = 0 if (x,y) in black_points else 1
wall.putpixel((x,TY-1-y), c)
wall = wall.resize((wall.width * 50, wall.height*50))
wall.save("matricula.png")
if ANIMATION:
imageio.mimsave("animation.gif", pics, fps=10)
system("rm -rf tmp")
|
[
"PIL.Image.new",
"intMachine.intMachine",
"imageio.imread",
"os.system",
"sys.exit",
"imageio.mimsave"
] |
[((1378, 1406), 'intMachine.intMachine', 'intMachine', (['program', '[color]'], {}), '(program, [color])\n', (1388, 1406), False, 'from intMachine import intMachine\n'), ((1836, 1860), 'PIL.Image.new', 'Image.new', (['"""1"""', '(TX, TY)'], {}), "('1', (TX, TY))\n", (1845, 1860), False, 'from PIL import Image\n'), ((919, 943), 'PIL.Image.new', 'Image.new', (['"""1"""', '(TX, TY)'], {}), "('1', (TX, TY))\n", (928, 943), False, 'from PIL import Image\n'), ((1503, 1522), 'os.system', 'system', (['"""mkdir tmp"""'], {}), "('mkdir tmp')\n", (1509, 1522), False, 'from os import system\n'), ((2087, 2133), 'imageio.mimsave', 'imageio.mimsave', (['"""animation.gif"""', 'pics'], {'fps': '(10)'}), "('animation.gif', pics, fps=10)\n", (2102, 2133), False, 'import imageio\n'), ((2138, 2158), 'os.system', 'system', (['"""rm -rf tmp"""'], {}), "('rm -rf tmp')\n", (2144, 2158), False, 'from os import system\n'), ((1202, 1244), 'imageio.imread', 'imageio.imread', (['f"""tmp/matricula{cont}.png"""'], {}), "(f'tmp/matricula{cont}.png')\n", (1216, 1244), False, 'import imageio\n'), ((823, 833), 'sys.exit', 'sys.exit', ([], {}), '()\n', (831, 833), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
"""Project Training.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Kr15oI3Jz958EB30I3We9V278wZnGd83
"""
#!pip install tensorflow-gpu
import sklearn as sk
from sklearn.metrics import confusion_matrix
import seaborn as sns
import sys
import numpy
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential # For constructing model
from tensorflow.keras.layers import Dense, Dropout, Flatten # Layer cores
from tensorflow.keras.layers import Conv2D, MaxPooling2D # CNN layers
from tensorflow.keras.utils import to_categorical # Extra utilities
import pickle
from sklearn.model_selection import train_test_split
import os
from google.colab import drive
drive.mount('/content/drive')
from google.colab import drive
drive.mount('/content/drive')
# GPU test code
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
def loadData(fileName,size=0.2):
with open('/content/drive/My Drive/X_Y_Data.pickle', 'rb') as f:
X, Y = pickle.load(f)
X=X.reshape(-1,45,45,1)
X_train, X_test, y_train, y_test = train_test_split(X,Y,test_size = size)
return X_train, X_test, y_train, y_test
def createModel(size):
model = Sequential()
# Images are 48 by 48
model.add(Conv2D(32, (3,3), activation='relu', input_shape=size)) #46 by 46
model.add(MaxPooling2D())
model.add(Conv2D(64, (3,3), activation='relu')) #44 by 44
model.add(MaxPooling2D())
model.add(Conv2D(128, (3,3), activation='relu')) #44 by 44
model.add(MaxPooling2D())
model.add(Dropout(rate=0.15))
model.add(Flatten()) #1964 by 1
model.add(Dense(500, activation='relu')) #500 by 1
model.add(Dropout(0.2))
model.add(Dense(250, activation='relu')) #250 by 1
model.add(Dropout(0.2))
model.add(Dense(125, activation='relu')) #120 by 1
model.add(Dropout(0.2))
model.add(Dense(66, activation='softmax')) # 66 by 1 (only english, digits, and symbols)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
X_train, X_test, y_train, y_test = loadData('X_Y_Data.pickle')
model = createModel(X_train.shape[1:])
checkpoint_path = "training/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=True, period = 2)
model.save_weights(checkpoint_path.format(epoch=0))
model.fit(X_train, y_train, epochs=16, callbacks=[cp_callback], validation_split = .2)
loss, acc = model.evaluate(X_test, y_test)
# Get Confusion matrix
con_mat = confusion_matrix(y_test, model.predict(X_test).argmax(axis=1))
con_mat = con_mat.astype(float)
# numpy.set_printoptions(threshold=sys.maxsize)
# print(con_mat)
# Normalize confusion matrix row wise
for r in range(66):
s = float(sum(con_mat[r, :]))
for c in range(66):
con_mat[r, c] = float(con_mat[r, c])/s
#print(con_mat)
# Create heat map of confusion matrix
plt.figure(figsize=(18, 14))
sns.heatmap(con_mat, square=True, linewidths=0.01, linecolor='#A9A9A9')
|
[
"seaborn.heatmap",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"os.path.dirname",
"tensorflow.keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.figure",
"pickle.load",
"tensorflow.keras.models.Sequential",
"google.colab.drive.mount",
"tensorflow.test.gpu_device_name",
"tensorflow.keras.layers.Flatten"
] |
[((829, 858), 'google.colab.drive.mount', 'drive.mount', (['"""/content/drive"""'], {}), "('/content/drive')\n", (840, 858), False, 'from google.colab import drive\n'), ((891, 920), 'google.colab.drive.mount', 'drive.mount', (['"""/content/drive"""'], {}), "('/content/drive')\n", (902, 920), False, 'from google.colab import drive\n'), ((952, 977), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (975, 977), True, 'import tensorflow as tf\n'), ((2503, 2535), 'os.path.dirname', 'os.path.dirname', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2518, 2535), False, 'import os\n'), ((2551, 2660), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'verbose': '(1)', 'save_weights_only': '(True)', 'period': '(2)'}), '(filepath=checkpoint_path, verbose=1,\n save_weights_only=True, period=2)\n', (2585, 2660), True, 'import tensorflow as tf\n'), ((3250, 3278), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 14)'}), '(figsize=(18, 14))\n', (3260, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3279, 3350), 'seaborn.heatmap', 'sns.heatmap', (['con_mat'], {'square': '(True)', 'linewidths': '(0.01)', 'linecolor': '"""#A9A9A9"""'}), "(con_mat, square=True, linewidths=0.01, linecolor='#A9A9A9')\n", (3290, 3350), True, 'import seaborn as sns\n'), ((1308, 1346), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': 'size'}), '(X, Y, test_size=size)\n', (1324, 1346), False, 'from sklearn.model_selection import train_test_split\n'), ((1428, 1440), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1438, 1440), False, 'from tensorflow.keras.models import Sequential\n'), ((1221, 1235), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1232, 1235), False, 'import pickle\n'), ((1482, 1537), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'size'}), "(32, (3, 3), activation='relu', input_shape=size)\n", (1488, 1537), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((1562, 1576), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (1574, 1576), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((1592, 1629), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (1598, 1629), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((1654, 1668), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (1666, 1668), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((1684, 1722), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (1690, 1722), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((1747, 1761), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (1759, 1761), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((1777, 1795), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.15)'}), '(rate=0.15)\n', (1784, 1795), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((1811, 1820), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1818, 1820), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((1847, 1876), 'tensorflow.keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (1852, 1876), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((1902, 1914), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1909, 1914), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((1930, 1959), 'tensorflow.keras.layers.Dense', 'Dense', (['(250)'], {'activation': '"""relu"""'}), "(250, activation='relu')\n", (1935, 1959), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((1985, 1997), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1992, 1997), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((2013, 2042), 'tensorflow.keras.layers.Dense', 'Dense', (['(125)'], {'activation': '"""relu"""'}), "(125, activation='relu')\n", (2018, 2042), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((2068, 2080), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2075, 2080), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n'), ((2096, 2127), 'tensorflow.keras.layers.Dense', 'Dense', (['(66)'], {'activation': '"""softmax"""'}), "(66, activation='softmax')\n", (2101, 2127), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten\n')]
|
"""
Lorenz system
"""
from scipy.integrate import odeint
from scipy.stats import norm
class lorenz_system_63:
def __init__(self, rho = None, sigma = None, beta = None):
self.rho = 28.0
self.sigma = 10.0
self.beta = 8.0 / 3.0
self.N = 3
self.x0 = norm.rvs(size = self.N).reshape((self.N,))*4.
def f(self,state, t):
x, y, z = state # unpack the state vector
return self.sigma * (y - x), x * (self.rho - z) - y, x * y - self.beta * z # derivatives
if __name__ == '__main__':
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
ls = lorenz_system()
state0 = [1.0, 1.0, 1.0]
t = np.arange(0.0, 40.0, 0.01)
states = odeint(ls.f, state0, t)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(states[:,0], states[:,1], states[:,2])
plt.show()
|
[
"matplotlib.pyplot.show",
"scipy.stats.norm.rvs",
"scipy.integrate.odeint",
"matplotlib.pyplot.figure",
"numpy.arange"
] |
[((705, 731), 'numpy.arange', 'np.arange', (['(0.0)', '(40.0)', '(0.01)'], {}), '(0.0, 40.0, 0.01)\n', (714, 731), True, 'import numpy as np\n'), ((745, 768), 'scipy.integrate.odeint', 'odeint', (['ls.f', 'state0', 't'], {}), '(ls.f, state0, t)\n', (751, 768), False, 'from scipy.integrate import odeint\n'), ((780, 792), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (790, 792), True, 'import matplotlib.pyplot as plt\n'), ((882, 892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (890, 892), True, 'import matplotlib.pyplot as plt\n'), ((293, 314), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'size': 'self.N'}), '(size=self.N)\n', (301, 314), False, 'from scipy.stats import norm\n')]
|
from keras.models import Sequential, Model
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
import numpy as np
from collections import Counter
import types
from PIL import Image
def test_image():
size = (256,250)
color = (210,150,160)
img = Image.new("RGBA",size,color)
return np.asarray(img)[:, :, :3]
def to_cpp_tensor(readable_tensor, is_bias=False, strip_channels=False):
tensor = readable_tensor
if isinstance(readable_tensor, list):
if is_bias:
tensor = readable_tensor[1]
else:
tensor = readable_tensor[0]
if strip_channels and len(tensor.shape) == 4 and tensor.shape[0] == 1:
tensor = tensor[0]
declaration = 'Numeric tensor' + str(tensor.shape)
declaration = declaration.replace('(', '[')
declaration = declaration.replace(', ', '][')
declaration = declaration.replace(',', '')
declaration = declaration.replace(')', ']')
tstr = str(repr(tensor))
tstr = tstr.replace(')', '')
tstr = tstr.replace('array(', '')
tstr = tstr.replace('[', '{')
tstr = tstr.replace(']', '}')
tstr = tstr.replace(', dtype=float32', '')
return '{} =\n {};'.format(declaration, tstr)
def list_lambda(func, value):
if isinstance(value, list):
return [func(x) for x in value]
else:
return func(value)
# Translates from Keras Default
# (rows, cols, depth, nr_filters)
# to
# (nr_filters, depth, rows, cols)
def to_readable_weight(tensor):
def to_readable_arr(arr):
arr = np.swapaxes(arr, 3, 0)
arr = np.swapaxes(arr, 2, 1)
arr = np.swapaxes(arr, 2, 3)
return arr
return list_lambda(to_readable_arr, tensor)
# Translates from readable
# (nr_filters, depth, rows, cols)
# to
# (rows, cols, depth, nr_filters)
def to_keras_weight(tensor):
def to_keras_arr(arr):
arr = np.swapaxes(arr, 0, 3)
arr = np.swapaxes(arr, 1, 2)
arr = np.swapaxes(arr, 0, 1)
return arr
return list_lambda(to_keras_arr, tensor)
###########################################
# Translates from readable
# (nr_inputs, depth, rows, cols)
# to
# (nr_inputs, rows, cols, depth)
def to_keras_tensor(tensor):
def to_keras_arr(arr):
arr = np.swapaxes(arr, 1, 2)
arr = np.swapaxes(arr, 3, 2)
return arr
return list_lambda(to_keras_arr, tensor)
# Translates from Keras
# (nr_inputs, rows, cols, depth)
# to
# (nr_inputs, depth, rows, cols)
def to_readable_tensor(tensor, batch=True):
if batch:
def to_readable_arr(arr):
arr = np.swapaxes(arr, 3, 1)
arr = np.swapaxes(arr, 2, 3)
return arr
else:
# (rows, cols, depth) to (depth, rows, cols)
def to_readable_arr(arr):
arr = np.swapaxes(arr, 2, 0)
return arr
return list_lambda(to_readable_arr, tensor)
readable_input = np.array(
[[
[
[ 0., 1., 2., 3., 4. ],
[ 5., 6., 7., 8., 9. ],
[ 10., 11., 12., 13., 14. ],
[ 15., 16., 17., 18., 19. ],
[ 20., 21., 22., 23., 24. ]
],
[
[ 0., 1., 2., 3., 4. ],
[ 5., 6., 7., 8., 9. ],
[ 10., 11., 12., 13., 14. ],
[ 15., 16., 17., 18., 19. ],
[ 20., 21., 22., 23., 24. ]
],
[
[ 0., 1., 2., 3., 4. ],
[ 5., 6., 7., 8., 9. ],
[ 10., 11., 12., 13., 14. ],
[ 15., 16., 17., 18., 19. ],
[ 20., 21., 22., 23., 24. ]
],
]]
)
test_weights = [np.array(
[
[
[ [0., 0.], [0., 0.], [0., 0.] ],
[ [1., 1.], [1., 1.], [1., 1.] ],
[ [2., 2.], [2., 2.], [2., 2.] ],
],
[
[ [3., 3.], [3., 3.], [3., 3.] ],
[ [4., 4.], [4., 4.], [4., 4.] ],
[ [5., 5.], [5., 5.], [5., 5.] ],
],
[
[ [6., 6.], [6., 6.], [6., 6.] ],
[ [7., 7.], [7., 7.], [7., 7.] ],
[ [8., 8.], [8., 8.], [8., 8.] ],
]
]
)]
readable_test_weights = [np.array(
[
[ # Kernel 1
[
[ 0., 1., 2. ],
[ 3., 4., 5. ],
[ 6., 7., 8. ]
],
[
[ 0., 1., 2. ],
[ 3., 4., 5. ],
[ 6., 7., 8. ]
],
[
[ 0., 1., 2. ],
[ 3., 4., 5. ],
[ 6., 7., 8. ]
],
],
[ # Kernel 2
[
[ 0., 1., 2. ],
[ 3., 4., 5. ],
[ 6., 7., 8. ]
],
[
[ 0., 1., 2. ],
[ 3., 4., 5. ],
[ 6., 7., 8. ]
],
[
[ 0., 1., 2. ],
[ 3., 4., 5. ],
[ 6., 7., 8. ]
],
]
]
)]
# Channels last is DEFAULT
# Input shape: (rows, cols, depth)
#
# Weights: [(rows, cols, depth, nr_filters)]
model = Sequential()
# weights = [np.ones((3, 3, 3, 1))]
weights = test_weights
weights.append(np.array([0, 1]))
# weights = to_keras_tensor(readable_test_weights)
print(to_cpp_tensor(readable_test_weights))
print(to_cpp_tensor(readable_input))
# print(to_keras_weight(readable_test_weights))
test_layer = Conv2D(2, (3, 3), input_shape=(5, 5, 3), weights=weights, use_bias=True, name='conv')
# test_layer.set_weights(test_weights_2)
# print(test_layer.get_weights())
# test_layer.set_weights(test_weights)
# print(test_layer)
model.add(test_layer)
model.compile(optimizer='sgd', loss='mean_squared_error')
# print(test_layer.get_weights())
print(to_cpp_tensor(test_layer.get_weights(), is_bias=True))
out = model.predict(to_keras_tensor(readable_input))
print(to_readable_tensor(out))
np.set_printoptions(threshold=np.nan)
print(to_readable_tensor(test_image(), False))
print(test_image().shape)
print(to_cpp_tensor(to_readable_tensor(test_image(), False)))
|
[
"PIL.Image.new",
"numpy.set_printoptions",
"numpy.asarray",
"numpy.array",
"keras.layers.Conv2D",
"numpy.swapaxes",
"keras.models.Sequential"
] |
[((2812, 3309), 'numpy.array', 'np.array', (['[[[[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0,\n 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, 22.0, 23.0, \n 24.0]], [[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, \n 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, \n 22.0, 23.0, 24.0]], [[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, \n 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [\n 20.0, 21.0, 22.0, 23.0, 24.0]]]]'], {}), '([[[[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, \n 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, \n 22.0, 23.0, 24.0]], [[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, \n 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [\n 20.0, 21.0, 22.0, 23.0, 24.0]], [[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, \n 7.0, 8.0, 9.0], [10.0, 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0,\n 19.0], [20.0, 21.0, 22.0, 23.0, 24.0]]]])\n', (2820, 3309), True, 'import numpy as np\n'), ((4596, 4608), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4606, 4608), False, 'from keras.models import Sequential, Model\n'), ((4895, 4984), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {'input_shape': '(5, 5, 3)', 'weights': 'weights', 'use_bias': '(True)', 'name': '"""conv"""'}), "(2, (3, 3), input_shape=(5, 5, 3), weights=weights, use_bias=True,\n name='conv')\n", (4901, 4984), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((5381, 5418), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (5400, 5418), True, 'import numpy as np\n'), ((433, 463), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'size', 'color'], {}), "('RGBA', size, color)\n", (442, 463), False, 'from PIL import Image\n'), ((3410, 3788), 'numpy.array', 'np.array', (['[[[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]\n ], [[2.0, 2.0], [2.0, 2.0], [2.0, 2.0]]], [[[3.0, 3.0], [3.0, 3.0], [\n 3.0, 3.0]], [[4.0, 4.0], [4.0, 4.0], [4.0, 4.0]], [[5.0, 5.0], [5.0, \n 5.0], [5.0, 5.0]]], [[[6.0, 6.0], [6.0, 6.0], [6.0, 6.0]], [[7.0, 7.0],\n [7.0, 7.0], [7.0, 7.0]], [[8.0, 8.0], [8.0, 8.0], [8.0, 8.0]]]]'], {}), '([[[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 1.0], [\n 1.0, 1.0]], [[2.0, 2.0], [2.0, 2.0], [2.0, 2.0]]], [[[3.0, 3.0], [3.0, \n 3.0], [3.0, 3.0]], [[4.0, 4.0], [4.0, 4.0], [4.0, 4.0]], [[5.0, 5.0], [\n 5.0, 5.0], [5.0, 5.0]]], [[[6.0, 6.0], [6.0, 6.0], [6.0, 6.0]], [[7.0, \n 7.0], [7.0, 7.0], [7.0, 7.0]], [[8.0, 8.0], [8.0, 8.0], [8.0, 8.0]]]])\n', (3418, 3788), True, 'import numpy as np\n'), ((3873, 4222), 'numpy.array', 'np.array', (['[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[0.0, 1.0, 2.0], [\n 3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [\n 6.0, 7.0, 8.0]]], [[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]],\n [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[0.0, 1.0, 2.0],\n [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]]'], {}), '([[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[0.0, 1.0,\n 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[0.0, 1.0, 2.0], [3.0, 4.0, \n 5.0], [6.0, 7.0, 8.0]]], [[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0,\n 8.0]], [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[0.0, 1.0,\n 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]])\n', (3881, 4222), True, 'import numpy as np\n'), ((4683, 4699), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (4691, 4699), True, 'import numpy as np\n'), ((470, 485), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (480, 485), True, 'import numpy as np\n'), ((1597, 1619), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(3)', '(0)'], {}), '(arr, 3, 0)\n', (1608, 1619), True, 'import numpy as np\n'), ((1628, 1650), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(2)', '(1)'], {}), '(arr, 2, 1)\n', (1639, 1650), True, 'import numpy as np\n'), ((1659, 1681), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(2)', '(3)'], {}), '(arr, 2, 3)\n', (1670, 1681), True, 'import numpy as np\n'), ((1904, 1926), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(0)', '(3)'], {}), '(arr, 0, 3)\n', (1915, 1926), True, 'import numpy as np\n'), ((1935, 1957), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(1)', '(2)'], {}), '(arr, 1, 2)\n', (1946, 1957), True, 'import numpy as np\n'), ((1966, 1988), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(0)', '(1)'], {}), '(arr, 0, 1)\n', (1977, 1988), True, 'import numpy as np\n'), ((2253, 2275), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(1)', '(2)'], {}), '(arr, 1, 2)\n', (2264, 2275), True, 'import numpy as np\n'), ((2284, 2306), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(3)', '(2)'], {}), '(arr, 3, 2)\n', (2295, 2306), True, 'import numpy as np\n'), ((2552, 2574), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(3)', '(1)'], {}), '(arr, 3, 1)\n', (2563, 2574), True, 'import numpy as np\n'), ((2584, 2606), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(2)', '(3)'], {}), '(arr, 2, 3)\n', (2595, 2606), True, 'import numpy as np\n'), ((2712, 2734), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(2)', '(0)'], {}), '(arr, 2, 0)\n', (2723, 2734), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import apply_regularization, l2_regularizer
class MultiDAE(object):
def __init__(self, p_dims, q_dims=None, lam=0.01, lr=1e-3, random_seed=None):
self.p_dims = p_dims
if q_dims is None:
self.q_dims = p_dims[::-1]
else:
assert q_dims[0] == p_dims[-1], "Input and output dimension must equal each other for autoencoders."
assert q_dims[-1] == p_dims[0], "Latent dimension for p- and q-network mismatches."
self.q_dims = q_dims
self.dims = self.q_dims + self.p_dims[1:]
self.lam = lam
self.lr = lr
self.random_seed = random_seed
self.construct_placeholders()
def construct_placeholders(self):
self.input_ph = tf.placeholder(
dtype=tf.float32, shape=[None, self.dims[0]])
self.keep_prob_ph = tf.placeholder_with_default(1.0, shape=None)
def build_graph(self):
self.construct_weights()
saver, logits = self.forward_pass()
log_softmax_var = tf.nn.log_softmax(logits)
# per-user average negative log-likelihood
neg_ll = -tf.reduce_mean(tf.reduce_sum(
log_softmax_var * self.input_ph, axis=1))
# apply regularization to weights
reg = l2_regularizer(self.lam)
reg_var = apply_regularization(reg, self.weights)
# tensorflow l2 regularization multiply 0.5 to the l2 norm
# multiply 2 so that it is back in the same scale
loss = neg_ll + 2 * reg_var
train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
# add summary statistics
tf.summary.scalar('negative_multi_ll', neg_ll)
tf.summary.scalar('loss', loss)
merged = tf.summary.merge_all()
return saver, logits, loss, train_op, merged
def forward_pass(self):
# construct forward graph
h = tf.nn.l2_normalize(self.input_ph, 1)
h = tf.nn.dropout(h, self.keep_prob_ph)
for i, (w, b) in enumerate(zip(self.weights, self.biases)):
h = tf.matmul(h, w) + b
if i != len(self.weights) - 1:
h = tf.nn.tanh(h)
return tf.train.Saver(), h
def construct_weights(self):
self.weights = []
self.biases = []
# define weights
for i, (d_in, d_out) in enumerate(zip(self.dims[:-1], self.dims[1:])):
weight_key = "weight_{}to{}".format(i, i+1)
bias_key = "bias_{}".format(i+1)
self.weights.append(tf.get_variable(
name=weight_key, shape=[d_in, d_out],
initializer=tf.contrib.layers.xavier_initializer(
seed=self.random_seed)))
self.biases.append(tf.get_variable(
name=bias_key, shape=[d_out],
initializer=tf.truncated_normal_initializer(
stddev=0.001, seed=self.random_seed)))
# add summary stats
tf.summary.histogram(weight_key, self.weights[-1])
tf.summary.histogram(bias_key, self.biases[-1])
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.contrib.layers.apply_regularization",
"tensorflow.reduce_sum",
"tensorflow.truncated_normal_initializer",
"tensorflow.summary.scalar",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.nn.log_softmax",
"tensorflow.train.Saver",
"tensorflow.placeholder_with_default",
"tensorflow.nn.tanh",
"tensorflow.nn.l2_normalize",
"tensorflow.nn.dropout",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.summary.histogram",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.merge_all"
] |
[((850, 910), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, self.dims[0]]'}), '(dtype=tf.float32, shape=[None, self.dims[0]])\n', (864, 910), True, 'import tensorflow as tf\n'), ((954, 998), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': 'None'}), '(1.0, shape=None)\n', (981, 998), True, 'import tensorflow as tf\n'), ((1139, 1164), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {}), '(logits)\n', (1156, 1164), True, 'import tensorflow as tf\n'), ((1381, 1405), 'tensorflow.contrib.layers.l2_regularizer', 'l2_regularizer', (['self.lam'], {}), '(self.lam)\n', (1395, 1405), False, 'from tensorflow.contrib.layers import apply_regularization, l2_regularizer\n'), ((1425, 1464), 'tensorflow.contrib.layers.apply_regularization', 'apply_regularization', (['reg', 'self.weights'], {}), '(reg, self.weights)\n', (1445, 1464), False, 'from tensorflow.contrib.layers import apply_regularization, l2_regularizer\n'), ((1751, 1797), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""negative_multi_ll"""', 'neg_ll'], {}), "('negative_multi_ll', neg_ll)\n", (1768, 1797), True, 'import tensorflow as tf\n'), ((1807, 1838), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (1824, 1838), True, 'import tensorflow as tf\n'), ((1857, 1879), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (1877, 1879), True, 'import tensorflow as tf\n'), ((2021, 2057), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.input_ph', '(1)'], {}), '(self.input_ph, 1)\n', (2039, 2057), True, 'import tensorflow as tf\n'), ((2071, 2106), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h', 'self.keep_prob_ph'], {}), '(h, self.keep_prob_ph)\n', (2084, 2106), True, 'import tensorflow as tf\n'), ((2332, 2348), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2346, 2348), True, 'import tensorflow as tf\n'), ((3186, 3236), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['weight_key', 'self.weights[-1]'], {}), '(weight_key, self.weights[-1])\n', (3206, 3236), True, 'import tensorflow as tf\n'), ((3250, 3297), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['bias_key', 'self.biases[-1]'], {}), '(bias_key, self.biases[-1])\n', (3270, 3297), True, 'import tensorflow as tf\n'), ((1253, 1307), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_softmax_var * self.input_ph)'], {'axis': '(1)'}), '(log_softmax_var * self.input_ph, axis=1)\n', (1266, 1307), True, 'import tensorflow as tf\n'), ((1659, 1690), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (1681, 1690), True, 'import tensorflow as tf\n'), ((2203, 2218), 'tensorflow.matmul', 'tf.matmul', (['h', 'w'], {}), '(h, w)\n', (2212, 2218), True, 'import tensorflow as tf\n'), ((2302, 2315), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['h'], {}), '(h)\n', (2312, 2315), True, 'import tensorflow as tf\n'), ((2810, 2869), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': 'self.random_seed'}), '(seed=self.random_seed)\n', (2846, 2869), True, 'import tensorflow as tf\n'), ((3033, 3101), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.001)', 'seed': 'self.random_seed'}), '(stddev=0.001, seed=self.random_seed)\n', (3064, 3101), True, 'import tensorflow as tf\n')]
|
#! /usr/bin/env python
import colorsys
import time
import random
from rclpy.qos import (
QoSProfile,
QoSDurabilityPolicy,
QoSHistoryPolicy,
QoSReliabilityPolicy,
)
from rclpy.node import Node
from std_msgs.msg import Header, Char, ColorRGBA, Float32, String
from geometry_msgs.msg import TwistStamped, Twist, Vector3
from builtin_interfaces.msg import Time
from px4_msgs.msg import OffboardControlMode
from px4_msgs.msg import TrajectorySetpoint
from px4_msgs.msg import Timesync
from px4_msgs.msg import VehicleCommand
from px4_msgs.msg import VehicleControlMode
from px4_msgs.msg import Cpuload
from .base_visualization import BaseVisualization
CUSTOM_QOS = QoSProfile(
history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST,
reliability=QoSReliabilityPolicy.
RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT,
# RMW_QOS_POLICY_RELIABILITY_RELIABLE,
depth=1,
durability=QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_VOLATILE,
)
# TODO adjust for PX4, for now only copied version for Balboas
class PX4Visualization(BaseVisualization):
ALTITUDE = 15
def __init__(self, namespace):
self.node = Node('px4_visualization', namespace=namespace)
self.timestamp_ = 0
self.vx, self.vy, self.vz = (0, 0, 0)
self.offboard_control_mode_publisher_ = self.node.create_publisher(
OffboardControlMode, 'OffboardControlMode_PubSubTopic', 10
)
self.trajectory_setpoint_publisher_ = self.node.create_publisher(
TrajectorySetpoint, 'TrajectorySetpoint_PubSubTopic', 10
)
self.vehicle_command_publisher_ = self.node.create_publisher(
VehicleCommand, 'VehicleCommand_PubSubTopic', 10
)
self.offboard_setpoint_counter_ = 0
def timesync_callback(msg):
self.timestamp_ = msg.timestamp
def timer_callback():
# if self.offboard_setpoint_counter_ == 10:
# # Change to Offboard mode after 10 setpoints
# self._publish_vehicle_command(
# VehicleCommand.VEHICLE_CMD_DO_SET_MODE, 1.0, 6.0
# )
# # Arm the vehicle
# self._arm()
# Offboard_control_mode needs to be paired with trajectory_setpoint
self._publish_offboard_control_mode()
self._publish_trajectory_setpoint()
# Stop the counter after reaching 11
if self.offboard_setpoint_counter_ < 11:
self.offboard_setpoint_counter_ += 1
self.timesync_sub_ = self.node.create_subscription(
Timesync, 'Timesync_PubSubTopic', timesync_callback, 10
)
self.log_ = self.node.get_logger()
self.timer_ = self.node.create_timer(0.1, timer_callback)
def update(self, states, t=None):
# self.node.get_logger().info(
# 'state received'
# )
assert len(states) == 1
for state in states.values():
self.vx, self.vy, self.vz = state.velocity
'''
Publish the offboard control mode.
For this example, only position and altitude controls are active.
'''
def _publish_offboard_control_mode(self):
msg = OffboardControlMode()
msg.timestamp = self.timestamp_
msg.position = False
msg.velocity = True
msg.acceleration = False
msg.attitude = False
msg.body_rate = False
self.offboard_control_mode_publisher_.publish(msg)
'''
'''
def _publish_trajectory_setpoint(self):
msg = TrajectorySetpoint()
msg.timestamp = self.timestamp_
msg.x = float("NaN")
msg.y = float("NaN")
msg.z = -float(self.ALTITUDE)
msg.vx = float(self.vx)
msg.vy = float(self.vy)
msg.vz = float("NaN")
msg.yaw = 3.14
self.trajectory_setpoint_publisher_.publish(msg)
'''
Publish vehicle commands
command Command code (matches VehicleCommand and MAVLink MAV_CMD codes)
param1 Command parameter 1
param2 Command parameter 2
'''
def _publish_vehicle_command(self, command, param1, param2):
msg = VehicleCommand()
msg.timestamp = self.timestamp_
msg.param1 = param1
msg.param2 = param2
msg.command = command
msg.target_system = 1
msg.target_component = 1
msg.source_system = 1
msg.source_component = 1
msg.from_external = True
self.vehicle_command_publisher_.publish(msg)
def _arm(self):
self._publish_vehicle_command(
VehicleCommand.VEHICLE_CMD_COMPONENT_ARM_DISARM, 1.0, 0.0
)
self.log_.info("Arm command send")
def _disarm(self):
self._publish_vehicle_command(
VehicleCommand.VEHICLE_CMD_COMPONENT_ARM_DISARM, 0.0, 0.0
)
self.log_.info("Disarm command send")
|
[
"px4_msgs.msg.VehicleCommand",
"px4_msgs.msg.TrajectorySetpoint",
"rclpy.node.Node",
"px4_msgs.msg.OffboardControlMode",
"rclpy.qos.QoSProfile"
] |
[((680, 906), 'rclpy.qos.QoSProfile', 'QoSProfile', ([], {'history': 'QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST', 'reliability': 'QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT', 'depth': '(1)', 'durability': 'QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_VOLATILE'}), '(history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST,\n reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT,\n depth=1, durability=QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_VOLATILE)\n', (690, 906), False, 'from rclpy.qos import QoSProfile, QoSDurabilityPolicy, QoSHistoryPolicy, QoSReliabilityPolicy\n'), ((1149, 1195), 'rclpy.node.Node', 'Node', (['"""px4_visualization"""'], {'namespace': 'namespace'}), "('px4_visualization', namespace=namespace)\n", (1153, 1195), False, 'from rclpy.node import Node\n'), ((3220, 3241), 'px4_msgs.msg.OffboardControlMode', 'OffboardControlMode', ([], {}), '()\n', (3239, 3241), False, 'from px4_msgs.msg import OffboardControlMode\n'), ((3566, 3586), 'px4_msgs.msg.TrajectorySetpoint', 'TrajectorySetpoint', ([], {}), '()\n', (3584, 3586), False, 'from px4_msgs.msg import TrajectorySetpoint\n'), ((4169, 4185), 'px4_msgs.msg.VehicleCommand', 'VehicleCommand', ([], {}), '()\n', (4183, 4185), False, 'from px4_msgs.msg import VehicleCommand\n')]
|
from cradmin_legacy import crinstance, crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from django.http import Http404
from devilry.apps.core.models import Period, Assignment
from devilry.devilry_account.models import PeriodPermissionGroup, PermissionGroup
from devilry.devilry_admin.cradminextensions import devilry_crmenu_admin
from devilry.devilry_cradmin import devilry_crmenu
from devilry.devilry_cradmin import devilry_crinstance
from devilry.devilry_admin.views.period import admins
from devilry.devilry_admin.views.period import createassignment
from devilry.devilry_admin.views.period import examiners
from devilry.devilry_admin.views.period import overview
from devilry.devilry_admin.views.period import students
from devilry.devilry_admin.views.period import edit
from devilry.devilry_admin.views.period import overview_all_results
from devilry.devilry_qualifiesforexam import cradmin_app as qualifiesforexam
from devilry.devilry_admin.views.period.manage_tags import manage_tags
class Menu(devilry_crmenu_admin.Menu):
def build_menu(self):
super(Menu, self).build_menu()
period = self.request.cradmin_role
self.add_role_menuitem_object()
self.add_subject_breadcrumb_item(subject=period.subject)
self.add_period_breadcrumb_item(period=period, active=True)
def add_subject_breadcrumb_item(self, subject, active=False):
if self.cradmin_instance.get_devilryrole_for_requestuser() == 'periodadmin':
return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem(
label=subject.short_name,
url=reverse_cradmin_url(
instanceid='devilry_admin_subject_for_periodadmin',
appname='overview',
roleid=subject.id,
viewname=crapp.INDEXVIEW_NAME
),
active=active
))
else:
return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem(
label=subject.short_name,
url=reverse_cradmin_url(
instanceid='devilry_admin_subjectadmin',
appname='overview',
roleid=subject.id,
viewname=crapp.INDEXVIEW_NAME
),
active=active
))
class CrAdminInstance(devilry_crinstance.BaseCrInstanceAdmin):
menuclass = Menu
roleclass = Period
apps = [
('overview', overview.App),
('students', students.App),
('examiners', examiners.App),
('admins', admins.App),
('createassignment', createassignment.App),
('edit', edit.App),
('overview_all_results', overview_all_results.App),
('qualifiesforexam', qualifiesforexam.App),
('manage_tags', manage_tags.App),
]
id = 'devilry_admin_periodadmin'
rolefrontpage_appname = 'overview'
def get_rolequeryset(self):
return Period.objects.filter_user_is_admin(user=self.request.user)\
.order_by('-start_time')
def get_titletext_for_role(self, role):
"""
Get a short title briefly describing the given ``role``.
Remember that the role is n Period.
"""
period = role
return period
@classmethod
def matches_urlpath(cls, urlpath):
return urlpath.startswith('/devilry_admin/period')
def __get_devilryrole_for_requestuser(self, period=None):
period = period or self.request.cradmin_role
devilryrole = PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period(
user=self.request.user,
period=period
)
if devilryrole is None:
raise ValueError('Could not find a devilryrole for request.user. This must be a bug in '
'get_rolequeryset().')
return devilryrole
def get_devilryrole_for_requestuser(self, period=None):
"""
Get the devilryrole for the requesting user on the current
period (request.cradmin_instance).
The return values is the same as for
:meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,
exept that this method raises ValueError if it does not find a role.
"""
if not hasattr(self, '_devilryrole_for_requestuser'):
self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser(period=period)
return self._devilryrole_for_requestuser
def period_admin_access_semi_anonymous_assignments_restricted(self, period=None):
"""
Check if an admin should be restricted access to due being a
period-admin only and the period has one or more semi-anonymous
assignments
This method can be used to check whether access should be restricted for
some elements, e.g. in a view or a template.
Returns:
(bool): ``True`` if access should be restriced, else ``False``.
"""
devilryrole = self.get_devilryrole_for_requestuser(period=period)
period = period or self.request.cradmin_role
semi_anonymous_assignments_exist = Assignment.objects\
.filter(parentnode=period)\
.filter(anonymizationmode=Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)\
.exists()
if semi_anonymous_assignments_exist and devilryrole == PermissionGroup.GROUPTYPE_PERIODADMIN:
return True
return False
def get_role_from_rolequeryset(self, role):
"""
Overriden to check if the requestuser has access to specific apps using this
CrAdmin-instance if the requestuser is a period-admin and the period has any
semi-anonymous assignments.
"""
role = super().get_role_from_rolequeryset(role=role)
requesting_appname = self.request.cradmin_app.appname
if requesting_appname in ['qualifiesforexam', 'overview_all_results']:
if self.period_admin_access_semi_anonymous_assignments_restricted(period=role):
raise Http404()
return role
|
[
"devilry.apps.core.models.Assignment.objects.filter",
"cradmin_legacy.crinstance.reverse_cradmin_url",
"devilry.devilry_account.models.PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period",
"django.http.Http404",
"devilry.apps.core.models.Period.objects.filter_user_is_admin"
] |
[((3545, 3653), 'devilry.devilry_account.models.PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period', 'PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period', ([], {'user': 'self.request.user', 'period': 'period'}), '(user=self.\n request.user, period=period)\n', (3609, 3653), False, 'from devilry.devilry_account.models import PeriodPermissionGroup, PermissionGroup\n'), ((2971, 3030), 'devilry.apps.core.models.Period.objects.filter_user_is_admin', 'Period.objects.filter_user_is_admin', ([], {'user': 'self.request.user'}), '(user=self.request.user)\n', (3006, 3030), False, 'from devilry.apps.core.models import Period, Assignment\n'), ((6120, 6129), 'django.http.Http404', 'Http404', ([], {}), '()\n', (6127, 6129), False, 'from django.http import Http404\n'), ((1625, 1770), 'cradmin_legacy.crinstance.reverse_cradmin_url', 'reverse_cradmin_url', ([], {'instanceid': '"""devilry_admin_subject_for_periodadmin"""', 'appname': '"""overview"""', 'roleid': 'subject.id', 'viewname': 'crapp.INDEXVIEW_NAME'}), "(instanceid='devilry_admin_subject_for_periodadmin',\n appname='overview', roleid=subject.id, viewname=crapp.INDEXVIEW_NAME)\n", (1644, 1770), False, 'from cradmin_legacy.crinstance import reverse_cradmin_url\n'), ((2068, 2203), 'cradmin_legacy.crinstance.reverse_cradmin_url', 'reverse_cradmin_url', ([], {'instanceid': '"""devilry_admin_subjectadmin"""', 'appname': '"""overview"""', 'roleid': 'subject.id', 'viewname': 'crapp.INDEXVIEW_NAME'}), "(instanceid='devilry_admin_subjectadmin', appname=\n 'overview', roleid=subject.id, viewname=crapp.INDEXVIEW_NAME)\n", (2087, 2203), False, 'from cradmin_legacy.crinstance import reverse_cradmin_url\n'), ((5211, 5255), 'devilry.apps.core.models.Assignment.objects.filter', 'Assignment.objects.filter', ([], {'parentnode': 'period'}), '(parentnode=period)\n', (5236, 5255), False, 'from devilry.apps.core.models import Period, Assignment\n')]
|
"""Setup initial crack schema
Revision ID: 406c96d25e5e
Revises:
Create Date: 2020-07-31 15:23:28.430194+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('artist',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('year_founded', sa.Date(), nullable=True),
sa.Column('country', sa.String(length=32), nullable=True),
sa.Column('about', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('genre',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('highlights', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('release',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('label', sa.String(length=64), nullable=True),
sa.Column('type', sa.String(length=32), nullable=True),
sa.Column('album_kind', sa.String(length=32), nullable=True),
sa.Column('release_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['release_id'], ['release.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('song',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('trivia', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('artist_release',
sa.Column('artist_id', sa.Integer(), nullable=False),
sa.Column('release_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['artist_id'], ['artist.id'], ),
sa.ForeignKeyConstraint(['release_id'], ['release.id'], ),
sa.PrimaryKeyConstraint('artist_id', 'release_id')
)
op.create_table('genre_artist',
sa.Column('genre_id', sa.Integer(), nullable=False),
sa.Column('artist_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['artist_id'], ['artist.id'], ),
sa.ForeignKeyConstraint(['genre_id'], ['genre.id'], ),
sa.PrimaryKeyConstraint('genre_id', 'artist_id')
)
op.create_table('genre_release',
sa.Column('genre_id', sa.Integer(), nullable=False),
sa.Column('release_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['genre_id'], ['genre.id'], ),
sa.ForeignKeyConstraint(['release_id'], ['release.id'], ),
sa.PrimaryKeyConstraint('genre_id', 'release_id')
)
op.create_table('release_song',
sa.Column('release_id', sa.Integer(), nullable=False),
sa.Column('song_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['release_id'], ['release.id'], ),
sa.ForeignKeyConstraint(['song_id'], ['song.id'], ),
sa.PrimaryKeyConstraint('release_id', 'song_id')
)
op.create_table('sheet',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_uploaded', sa.Date(), nullable=True),
sa.Column('bpm', sa.Integer(), nullable=True),
sa.Column('song_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['song_id'], ['song.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tracktab',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('instrument', sa.String(length=32), nullable=True),
sa.Column('time_start', sa.Time(), nullable=True),
sa.Column('tuning', sa.String(length=64), nullable=True),
sa.Column('gp5', sa.LargeBinary(), nullable=True),
sa.Column('sheet_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['sheet_id'], ['sheet.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tracktab')
op.drop_table('sheet')
op.drop_table('release_song')
op.drop_table('genre_release')
op.drop_table('genre_artist')
op.drop_table('artist_release')
op.drop_table('song')
op.drop_table('release')
op.drop_table('genre')
op.drop_table('artist')
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.Time",
"sqlalchemy.Date",
"sqlalchemy.LargeBinary",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Text",
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.String",
"sqlalchemy.Integer"
] |
[((4030, 4055), 'alembic.op.drop_table', 'op.drop_table', (['"""tracktab"""'], {}), "('tracktab')\n", (4043, 4055), False, 'from alembic import op\n'), ((4060, 4082), 'alembic.op.drop_table', 'op.drop_table', (['"""sheet"""'], {}), "('sheet')\n", (4073, 4082), False, 'from alembic import op\n'), ((4087, 4116), 'alembic.op.drop_table', 'op.drop_table', (['"""release_song"""'], {}), "('release_song')\n", (4100, 4116), False, 'from alembic import op\n'), ((4121, 4151), 'alembic.op.drop_table', 'op.drop_table', (['"""genre_release"""'], {}), "('genre_release')\n", (4134, 4151), False, 'from alembic import op\n'), ((4156, 4185), 'alembic.op.drop_table', 'op.drop_table', (['"""genre_artist"""'], {}), "('genre_artist')\n", (4169, 4185), False, 'from alembic import op\n'), ((4190, 4221), 'alembic.op.drop_table', 'op.drop_table', (['"""artist_release"""'], {}), "('artist_release')\n", (4203, 4221), False, 'from alembic import op\n'), ((4226, 4247), 'alembic.op.drop_table', 'op.drop_table', (['"""song"""'], {}), "('song')\n", (4239, 4247), False, 'from alembic import op\n'), ((4252, 4276), 'alembic.op.drop_table', 'op.drop_table', (['"""release"""'], {}), "('release')\n", (4265, 4276), False, 'from alembic import op\n'), ((4281, 4303), 'alembic.op.drop_table', 'op.drop_table', (['"""genre"""'], {}), "('genre')\n", (4294, 4303), False, 'from alembic import op\n'), ((4308, 4331), 'alembic.op.drop_table', 'op.drop_table', (['"""artist"""'], {}), "('artist')\n", (4321, 4331), False, 'from alembic import op\n'), ((691, 720), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (714, 720), True, 'import sqlalchemy as sa\n'), ((926, 955), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (949, 955), True, 'import sqlalchemy as sa\n'), ((1405, 1460), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['release_id']", "['release.id']"], {}), "(['release_id'], ['release.id'])\n", (1428, 1460), True, 'import sqlalchemy as sa\n'), ((1468, 1497), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1491, 1497), True, 'import sqlalchemy as sa\n'), ((1698, 1727), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1721, 1727), True, 'import sqlalchemy as sa\n'), ((1893, 1946), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['artist_id']", "['artist.id']"], {}), "(['artist_id'], ['artist.id'])\n", (1916, 1946), True, 'import sqlalchemy as sa\n'), ((1954, 2009), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['release_id']", "['release.id']"], {}), "(['release_id'], ['release.id'])\n", (1977, 2009), True, 'import sqlalchemy as sa\n'), ((2017, 2067), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""artist_id"""', '"""release_id"""'], {}), "('artist_id', 'release_id')\n", (2040, 2067), True, 'import sqlalchemy as sa\n'), ((2229, 2282), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['artist_id']", "['artist.id']"], {}), "(['artist_id'], ['artist.id'])\n", (2252, 2282), True, 'import sqlalchemy as sa\n'), ((2290, 2341), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['genre_id']", "['genre.id']"], {}), "(['genre_id'], ['genre.id'])\n", (2313, 2341), True, 'import sqlalchemy as sa\n'), ((2349, 2397), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""genre_id"""', '"""artist_id"""'], {}), "('genre_id', 'artist_id')\n", (2372, 2397), True, 'import sqlalchemy as sa\n'), ((2561, 2612), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['genre_id']", "['genre.id']"], {}), "(['genre_id'], ['genre.id'])\n", (2584, 2612), True, 'import sqlalchemy as sa\n'), ((2620, 2675), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['release_id']", "['release.id']"], {}), "(['release_id'], ['release.id'])\n", (2643, 2675), True, 'import sqlalchemy as sa\n'), ((2683, 2732), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""genre_id"""', '"""release_id"""'], {}), "('genre_id', 'release_id')\n", (2706, 2732), True, 'import sqlalchemy as sa\n'), ((2894, 2949), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['release_id']", "['release.id']"], {}), "(['release_id'], ['release.id'])\n", (2917, 2949), True, 'import sqlalchemy as sa\n'), ((2957, 3006), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['song_id']", "['song.id']"], {}), "(['song_id'], ['song.id'])\n", (2980, 3006), True, 'import sqlalchemy as sa\n'), ((3014, 3062), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""release_id"""', '"""song_id"""'], {}), "('release_id', 'song_id')\n", (3037, 3062), True, 'import sqlalchemy as sa\n'), ((3318, 3367), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['song_id']", "['song.id']"], {}), "(['song_id'], ['song.id'])\n", (3341, 3367), True, 'import sqlalchemy as sa\n'), ((3375, 3404), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (3398, 3404), True, 'import sqlalchemy as sa\n'), ((3793, 3864), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['sheet_id']", "['sheet.id']"], {'ondelete': '"""CASCADE"""'}), "(['sheet_id'], ['sheet.id'], ondelete='CASCADE')\n", (3816, 3864), True, 'import sqlalchemy as sa\n'), ((3870, 3899), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (3893, 3899), True, 'import sqlalchemy as sa\n'), ((426, 438), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (436, 438), True, 'import sqlalchemy as sa\n'), ((479, 499), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (488, 499), True, 'import sqlalchemy as sa\n'), ((547, 556), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (554, 556), True, 'import sqlalchemy as sa\n'), ((599, 619), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (608, 619), True, 'import sqlalchemy as sa\n'), ((660, 669), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (667, 669), True, 'import sqlalchemy as sa\n'), ((776, 788), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (786, 788), True, 'import sqlalchemy as sa\n'), ((829, 849), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (838, 849), True, 'import sqlalchemy as sa\n'), ((895, 904), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (902, 904), True, 'import sqlalchemy as sa\n'), ((1013, 1025), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1023, 1025), True, 'import sqlalchemy as sa\n'), ((1066, 1086), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1075, 1086), True, 'import sqlalchemy as sa\n'), ((1126, 1138), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1136, 1138), True, 'import sqlalchemy as sa\n'), ((1179, 1199), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1188, 1199), True, 'import sqlalchemy as sa\n'), ((1239, 1259), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (1248, 1259), True, 'import sqlalchemy as sa\n'), ((1305, 1325), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (1314, 1325), True, 'import sqlalchemy as sa\n'), ((1371, 1383), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1381, 1383), True, 'import sqlalchemy as sa\n'), ((1552, 1564), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1562, 1564), True, 'import sqlalchemy as sa\n'), ((1605, 1625), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (1614, 1625), True, 'import sqlalchemy as sa\n'), ((1667, 1676), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (1674, 1676), True, 'import sqlalchemy as sa\n'), ((1799, 1811), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1809, 1811), True, 'import sqlalchemy as sa\n'), ((1858, 1870), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1868, 1870), True, 'import sqlalchemy as sa\n'), ((2136, 2148), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2146, 2148), True, 'import sqlalchemy as sa\n'), ((2194, 2206), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2204, 2206), True, 'import sqlalchemy as sa\n'), ((2467, 2479), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2477, 2479), True, 'import sqlalchemy as sa\n'), ((2526, 2538), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2536, 2538), True, 'import sqlalchemy as sa\n'), ((2803, 2815), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2813, 2815), True, 'import sqlalchemy as sa\n'), ((2859, 2871), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2869, 2871), True, 'import sqlalchemy as sa\n'), ((3118, 3130), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3128, 3130), True, 'import sqlalchemy as sa\n'), ((3180, 3189), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (3187, 3189), True, 'import sqlalchemy as sa\n'), ((3228, 3240), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3238, 3240), True, 'import sqlalchemy as sa\n'), ((3283, 3295), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3293, 3295), True, 'import sqlalchemy as sa\n'), ((3463, 3475), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3473, 3475), True, 'import sqlalchemy as sa\n'), ((3522, 3542), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (3531, 3542), True, 'import sqlalchemy as sa\n'), ((3588, 3597), 'sqlalchemy.Time', 'sa.Time', ([], {}), '()\n', (3595, 3597), True, 'import sqlalchemy as sa\n'), ((3639, 3659), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (3648, 3659), True, 'import sqlalchemy as sa\n'), ((3698, 3714), 'sqlalchemy.LargeBinary', 'sa.LargeBinary', ([], {}), '()\n', (3712, 3714), True, 'import sqlalchemy as sa\n'), ((3758, 3770), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (3768, 3770), True, 'import sqlalchemy as sa\n')]
|
from Trimports import trimport
from Trimports.arg_process import parse_argument
def main():
file_path = parse_argument()
trimport.run(file_path)
if __name__ == "__main__":
main()
|
[
"Trimports.arg_process.parse_argument",
"Trimports.trimport.run"
] |
[((110, 126), 'Trimports.arg_process.parse_argument', 'parse_argument', ([], {}), '()\n', (124, 126), False, 'from Trimports.arg_process import parse_argument\n'), ((131, 154), 'Trimports.trimport.run', 'trimport.run', (['file_path'], {}), '(file_path)\n', (143, 154), False, 'from Trimports import trimport\n')]
|